index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/write | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/write/objectmapper/MemoizedList.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.write.objectmapper;
import java.util.ArrayList;
import java.util.Collection;
/**
* A MemoizedList is a java.util.ArrayList which is expected to be memoized during a producer cycle.
*
* When a HollowObjectMapper adds a MemoizedList to the HollowWriteStateEngine, it will tag it with the ordinal
* which the corresponding record is assigned. If the same MemoizedList instance is encountered during the same cycle,
* then it will short-circuit the process of serializing the list -- returning the previously memoized ordinal.
*/
public class MemoizedList<E> extends ArrayList<E> {
private static final long serialVersionUID = 4055358559110722153L;
public MemoizedList() {
super();
}
public MemoizedList(int initialCapacity) {
super(initialCapacity);
}
public MemoizedList(Collection<? extends E> c) {
super(c);
}
transient long __assigned_ordinal = -1L;
}
| 9,000 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/write | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/write/objectmapper/HollowInline.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.write.objectmapper;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Indicates that a field of a POJO is inlined as a value rather than
* a reference.
* <p>
* The field's type must a be boxed primitive type or {@code String}.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target( {ElementType.FIELD})
public @interface HollowInline {
} | 9,001 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/write/objectmapper | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/write/objectmapper/flatrecords/HollowSchemaIdentifierMapper.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.write.objectmapper.flatrecords;
import com.netflix.hollow.core.schema.HollowObjectSchema.FieldType;
import com.netflix.hollow.core.schema.HollowSchema;
public interface HollowSchemaIdentifierMapper {
public HollowSchema getSchema(int identifier);
public FieldType[] getPrimaryKeyFieldTypes(int identifier);
public int getSchemaId(HollowSchema schema);
}
| 9,002 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/write/objectmapper | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/write/objectmapper/flatrecords/FlatRecordWriter.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.write.objectmapper.flatrecords;
import com.netflix.hollow.core.HollowDataset;
import com.netflix.hollow.core.index.key.PrimaryKey;
import com.netflix.hollow.core.memory.ArrayByteData;
import com.netflix.hollow.core.memory.ByteDataArray;
import com.netflix.hollow.core.memory.encoding.HashCodes;
import com.netflix.hollow.core.memory.encoding.VarInt;
import com.netflix.hollow.core.schema.HollowObjectSchema;
import com.netflix.hollow.core.schema.HollowSchema;
import com.netflix.hollow.core.schema.HollowSchema.SchemaType;
import com.netflix.hollow.core.util.IntList;
import com.netflix.hollow.core.write.HollowHashableWriteRecord;
import com.netflix.hollow.core.write.HollowHashableWriteRecord.HashBehavior;
import com.netflix.hollow.core.write.HollowWriteRecord;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class FlatRecordWriter {
private final HollowDataset dataset;
private final HollowSchemaIdentifierMapper schemaIdMapper;
private final ByteDataArray buf;
private final Map<Integer, List<RecordLocation>> recordLocationsByHashCode;
private final IntList recordLocationsByOrdinal;
public FlatRecordWriter(HollowDataset dataset, HollowSchemaIdentifierMapper schemaIdMapper) {
this.dataset = dataset;
this.schemaIdMapper = schemaIdMapper;
this.buf = new ByteDataArray();
this.recordLocationsByOrdinal = new IntList();
this.recordLocationsByHashCode = new HashMap<>();
}
public int write(HollowSchema schema, HollowWriteRecord rec) {
int schemaOrdinal = schemaIdMapper.getSchemaId(schema);
int nextRecordOrdinal = recordLocationsByOrdinal.size();
int recStart = (int) buf.length();
VarInt.writeVInt(buf, schemaOrdinal);
if (rec instanceof HollowHashableWriteRecord)
((HollowHashableWriteRecord) rec).writeDataTo(buf, HashBehavior.IGNORED_HASHES);
else
rec.writeDataTo(buf);
int recLen = (int) (buf.length() - recStart);
Integer recordHashCode = HashCodes.hashCode(buf.getUnderlyingArray(), recStart, recLen);
List<RecordLocation> existingRecLocs = recordLocationsByHashCode.get(recordHashCode);
if (existingRecLocs == null) {
RecordLocation newRecordLocation = new RecordLocation(nextRecordOrdinal, recStart, recLen);
existingRecLocs = Collections.<RecordLocation>singletonList(newRecordLocation);
recordLocationsByHashCode.put(recordHashCode, existingRecLocs);
recordLocationsByOrdinal.add(recStart);
return newRecordLocation.ordinal;
} else {
for (RecordLocation existing : existingRecLocs) {
if (recLen == existing.len && buf.getUnderlyingArray().rangeEquals(recStart, buf.getUnderlyingArray(), existing.start, recLen)) {
buf.setPosition(recStart);
return existing.ordinal;
}
}
RecordLocation newRecordLocation = new RecordLocation(nextRecordOrdinal, recStart, recLen);
if (existingRecLocs.size() == 1) {
List<RecordLocation> newRecLocs = new ArrayList<>(2);
newRecLocs.add(existingRecLocs.get(0));
newRecLocs.add(newRecordLocation);
recordLocationsByHashCode.put(recordHashCode, newRecLocs);
} else {
existingRecLocs.add(newRecordLocation);
}
recordLocationsByOrdinal.add(recStart);
return newRecordLocation.ordinal;
}
}
public FlatRecord generateFlatRecord() {
try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
writeTo(baos);
byte[] arr = baos.toByteArray();
ArrayByteData recordData = new ArrayByteData(arr);
return new FlatRecord(recordData, schemaIdMapper);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
public void writeTo(OutputStream os) throws IOException {
if (recordLocationsByOrdinal.size() == 0)
throw new IOException("No data to write!");
int locationOfTopRecord = recordLocationsByOrdinal.get(recordLocationsByOrdinal.size() - 1);
int schemaIdOfTopRecord = VarInt.readVInt(buf.getUnderlyingArray(), locationOfTopRecord);
HollowSchema schemaOfTopRecord = schemaIdMapper.getSchema(schemaIdOfTopRecord);
VarInt.writeVInt(os, locationOfTopRecord);
int pkFieldValueLocations[] = null;
if (schemaOfTopRecord.getSchemaType() == SchemaType.OBJECT) {
PrimaryKey primaryKey = ((HollowObjectSchema) schemaOfTopRecord).getPrimaryKey();
if (primaryKey != null) {
pkFieldValueLocations = new int[primaryKey.numFields()];
/// encode the locations of the primary key fields
for (int i = 0; i < primaryKey.numFields(); i++) {
int[] fieldPathIndex = primaryKey.getFieldPathIndex(dataset, i);
pkFieldValueLocations[i] = locatePrimaryKeyField(locationOfTopRecord, fieldPathIndex, 0);
}
}
}
VarInt.writeVInt(os, (int) buf.length() - locationOfTopRecord);
buf.getUnderlyingArray().writeTo(os, 0, buf.length());
if (pkFieldValueLocations != null) {
for (int i = 0; i < pkFieldValueLocations.length; i++) {
VarInt.writeVInt(os, pkFieldValueLocations[i]);
}
}
}
private int locatePrimaryKeyField(int locationOfCurrentRecord, int[] fieldPathIndex, int idx) {
int schemaIdOfRecord = VarInt.readVInt(buf.getUnderlyingArray(), locationOfCurrentRecord);
HollowObjectSchema recordSchema = (HollowObjectSchema) schemaIdMapper.getSchema(schemaIdOfRecord);
locationOfCurrentRecord += VarInt.sizeOfVInt(schemaIdOfRecord);
int fieldOffset = navigateToField(recordSchema, fieldPathIndex[idx], locationOfCurrentRecord);
if (idx == fieldPathIndex.length - 1)
return fieldOffset;
int ordinalOfNextRecord = VarInt.readVInt(buf.getUnderlyingArray(), fieldOffset);
int offsetOfNextRecord = recordLocationsByOrdinal.get(ordinalOfNextRecord);
return locatePrimaryKeyField(offsetOfNextRecord, fieldPathIndex, idx + 1);
}
private int navigateToField(HollowObjectSchema schema, int fieldIdx, int offset) {
for (int i = 0; i < fieldIdx; i++) {
switch (schema.getFieldType(i)) {
case INT:
case LONG:
case REFERENCE:
offset += VarInt.nextVLongSize(buf.getUnderlyingArray(), offset);
break;
case BYTES:
case STRING:
int fieldLength = VarInt.readVInt(buf.getUnderlyingArray(), offset);
offset += VarInt.sizeOfVInt(fieldLength);
offset += fieldLength;
break;
case BOOLEAN:
offset++;
break;
case DOUBLE:
offset += 8;
break;
case FLOAT:
offset += 4;
break;
}
}
return offset;
}
public void reset() {
buf.reset();
recordLocationsByHashCode.clear();
recordLocationsByOrdinal.clear();
}
private static class RecordLocation {
private final int ordinal;
private final long start;
private final int len;
public RecordLocation(int ordinal, long start, int len) {
this.ordinal = ordinal;
this.start = start;
this.len = len;
}
}
}
| 9,003 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/write/objectmapper | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/write/objectmapper/flatrecords/FlatRecordReader.java | package com.netflix.hollow.core.write.objectmapper.flatrecords;
import com.netflix.hollow.core.memory.encoding.VarInt;
import com.netflix.hollow.core.memory.encoding.ZigZag;
import com.netflix.hollow.core.schema.HollowObjectSchema;
import com.netflix.hollow.core.schema.HollowSchema;
import com.netflix.hollow.core.write.HollowObjectWriteRecord;
public class FlatRecordReader {
private final FlatRecord record;
public int pointer;
public FlatRecordReader(FlatRecord record) {
this.record = record;
this.pointer = record.dataStartByte;
}
public void reset() {
this.pointer = record.dataStartByte;
}
public void resetTo(int position) {
this.pointer = position;
}
public boolean hasMore() {
return pointer < record.dataEndByte;
}
public HollowSchema readSchema() {
int schemaId = VarInt.readVInt(record.data, this.pointer);
this.pointer += VarInt.sizeOfVInt(schemaId);
return record.schemaIdMapper.getSchema(schemaId);
}
public int readCollectionSize() {
int size = VarInt.readVInt(record.data, this.pointer);
this.pointer += VarInt.sizeOfVInt(size);
return size;
}
public int readOrdinal() {
if (VarInt.readVNull(record.data, this.pointer)) {
this.pointer += 1;
return -1;
}
int value = VarInt.readVInt(record.data, this.pointer);
this.pointer += VarInt.sizeOfVInt(value);
return value;
}
public Boolean readBoolean() {
if(VarInt.readVNull(record.data, this.pointer)) {
this.pointer += 1;
return null;
}
int value = record.data.get(this.pointer);
this.pointer += 1;
return value == 1 ? Boolean.TRUE : Boolean.FALSE;
}
public int readInt() {
if (VarInt.readVNull(record.data, this.pointer)) {
this.pointer += 1;
return Integer.MIN_VALUE;
}
int value = VarInt.readVInt(record.data, this.pointer);
this.pointer += VarInt.sizeOfVInt(value);
return ZigZag.decodeInt(value);
}
public long readLong() {
if (VarInt.readVNull(record.data, this.pointer)) {
this.pointer += 1;
return Long.MIN_VALUE;
}
long value = VarInt.readVLong(record.data, this.pointer);
this.pointer += VarInt.sizeOfVLong(value);
return ZigZag.decodeLong(value);
}
public float readFloat() {
int value = record.data.readIntBits(this.pointer);
this.pointer += 4;
if (value == HollowObjectWriteRecord.NULL_FLOAT_BITS) {
return Float.NaN;
}
return Float.intBitsToFloat(value);
}
public double readDouble() {
long value = record.data.readLongBits(this.pointer);
this.pointer += 8;
if (value == HollowObjectWriteRecord.NULL_DOUBLE_BITS) {
return Double.NaN;
}
return Double.longBitsToDouble(value);
}
public String readString() {
if (VarInt.readVNull(record.data, this.pointer)) {
this.pointer += 1;
return null;
}
int length = VarInt.readVInt(record.data, this.pointer);
this.pointer += VarInt.sizeOfVInt(length);
int cLength = VarInt.countVarIntsInRange(record.data, this.pointer, length);
char[] s = new char[cLength];
for(int i=0;i<cLength;i++) {
int charValue = VarInt.readVInt(record.data, this.pointer);
s[i] = (char)charValue;
this.pointer += VarInt.sizeOfVInt(charValue);
}
return new String(s);
}
public byte[] readBytes() {
if (VarInt.readVNull(record.data, this.pointer)) {
this.pointer += 1;
return null;
}
int length = VarInt.readVInt(record.data, this.pointer);
this.pointer += VarInt.sizeOfVInt(length);
byte[] b = new byte[length];
for(int i=0;i<length;i++) {
b[i] = record.data.get(this.pointer++);
}
return b;
}
public void skipSchema(HollowSchema schema) {
switch (schema.getSchemaType()) {
case OBJECT: {
HollowObjectSchema objectSchema = (HollowObjectSchema) schema;
int numFields = objectSchema.numFields();
for (int i = 0; i < numFields; i++) {
skipField(objectSchema.getFieldType(i));
}
break;
}
case LIST:
case SET: {
int numElements = readCollectionSize();
for (int i = 0; i < numElements; i++) {
readOrdinal();
}
break;
}
case MAP: {
int numElements = readCollectionSize();
for (int i = 0; i < numElements; i++) {
readOrdinal(); // key
readOrdinal(); // value
}
break;
}
}
}
public void skipField(HollowObjectSchema.FieldType fieldType) {
switch(fieldType) {
case BOOLEAN:
readBoolean();
break;
case BYTES:
readBytes();
break;
case DOUBLE:
readDouble();
break;
case FLOAT:
readFloat();
break;
case INT:
readInt();
break;
case LONG:
readLong();
break;
case REFERENCE:
readOrdinal();
break;
case STRING:
readString();
break;
}
}
}
| 9,004 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/write/objectmapper | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/write/objectmapper/flatrecords/FlatRecordDumper.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.write.objectmapper.flatrecords;
import com.netflix.hollow.core.memory.encoding.VarInt;
import com.netflix.hollow.core.memory.encoding.ZigZag;
import com.netflix.hollow.core.schema.HollowListSchema;
import com.netflix.hollow.core.schema.HollowMapSchema;
import com.netflix.hollow.core.schema.HollowObjectSchema;
import com.netflix.hollow.core.schema.HollowObjectSchema.FieldType;
import com.netflix.hollow.core.schema.HollowSchema;
import com.netflix.hollow.core.schema.HollowSetSchema;
import com.netflix.hollow.core.write.HollowListWriteRecord;
import com.netflix.hollow.core.write.HollowMapWriteRecord;
import com.netflix.hollow.core.write.HollowObjectWriteRecord;
import com.netflix.hollow.core.write.HollowSetWriteRecord;
import com.netflix.hollow.core.write.HollowWriteRecord;
import com.netflix.hollow.core.write.HollowWriteStateEngine;
import java.util.HashMap;
import java.util.Map;
public class FlatRecordDumper {
private final Map<Integer, Integer> ordinalMapping;
private final Map<String, HollowWriteRecord> writeRecords;
private final HollowWriteStateEngine stateEngine;
private FlatRecord record;
public FlatRecordDumper(HollowWriteStateEngine dumpTo) {
this.ordinalMapping = new HashMap<>();
this.writeRecords = new HashMap<>();
this.stateEngine = dumpTo;
}
public void dump(FlatRecord record) {
this.record = record;
this.ordinalMapping.clear();
int currentRecordPointer = record.dataStartByte;
int currentRecordOrdinal = 0;
while(currentRecordPointer < record.dataEndByte) {
int currentSchemaId = VarInt.readVInt(record.data, currentRecordPointer);
currentRecordPointer += VarInt.sizeOfVInt(currentSchemaId);
HollowSchema recordSchema = record.schemaIdMapper.getSchema(currentSchemaId);
HollowSchema engineSchema = stateEngine.getSchema(recordSchema.getName());
/// copy this record, then map the ordinal
/// if corresponding state is not available in state engine, skip the record.
currentRecordPointer = copyRecord(recordSchema, engineSchema, currentRecordPointer, currentRecordOrdinal++);
}
}
private int copyRecord(HollowSchema recordSchema, HollowSchema engineSchema, int currentRecordPointer, int currentRecordOrdinal) {
switch(recordSchema.getSchemaType()) {
case OBJECT:
return copyObjectRecord((HollowObjectSchema)recordSchema, (HollowObjectSchema)engineSchema, currentRecordPointer, currentRecordOrdinal);
case LIST:
return copyListRecord((HollowListSchema)engineSchema, currentRecordPointer, currentRecordOrdinal);
case SET:
return copySetRecord((HollowSetSchema)engineSchema, currentRecordPointer, currentRecordOrdinal);
case MAP:
return copyMapRecord((HollowMapSchema)engineSchema, currentRecordPointer, currentRecordOrdinal);
default:
throw new IllegalStateException("Unknown schema type: " + recordSchema.getSchemaType());
}
}
private int copyListRecord(HollowListSchema engineSchema, int currentRecordPointer, int currentRecordOrdinal) {
HollowListWriteRecord rec = engineSchema != null ? (HollowListWriteRecord)getWriteRecord(engineSchema) : null;
int numElements = VarInt.readVInt(record.data, currentRecordPointer);
currentRecordPointer += VarInt.sizeOfVInt(numElements);
for(int i=0;i<numElements;i++) {
int unmappedElementOrdinal = VarInt.readVInt(record.data, currentRecordPointer);
currentRecordPointer += VarInt.sizeOfVInt(unmappedElementOrdinal);
if(rec != null) {
int mappedElementOrdinal = ordinalMapping.get(unmappedElementOrdinal);
rec.addElement(mappedElementOrdinal);
}
}
if(engineSchema != null) {
int stateEngineOrdinal = stateEngine.add(engineSchema.getName(), rec);
ordinalMapping.put(currentRecordOrdinal, stateEngineOrdinal);
}
return currentRecordPointer;
}
private int copySetRecord(HollowSetSchema engineSchema, int currentRecordPointer, int currentRecordOrdinal) {
HollowSetWriteRecord rec = engineSchema != null ? (HollowSetWriteRecord)getWriteRecord(engineSchema) : null;
int numElements = VarInt.readVInt(record.data, currentRecordPointer);
currentRecordPointer += VarInt.sizeOfVInt(numElements);
int unmappedOrdinal = 0;
for(int i=0;i<numElements;i++) {
int unmappedOrdinalDelta = VarInt.readVInt(record.data, currentRecordPointer);
currentRecordPointer += VarInt.sizeOfVInt(unmappedOrdinalDelta);
unmappedOrdinal += unmappedOrdinalDelta;
if(rec != null) {
int mappedOrdinal = ordinalMapping.get(unmappedOrdinal);
rec.addElement(mappedOrdinal);
}
}
if(engineSchema != null) {
int stateEngineOrdinal = stateEngine.add(engineSchema.getName(), rec);
ordinalMapping.put(currentRecordOrdinal, stateEngineOrdinal);
}
return currentRecordPointer;
}
private int copyMapRecord(HollowMapSchema engineSchema, int currentRecordPointer, int currentRecordOrdinal) {
HollowMapWriteRecord rec = engineSchema != null ? (HollowMapWriteRecord)getWriteRecord(engineSchema) : null;
int numElements = VarInt.readVInt(record.data, currentRecordPointer);
currentRecordPointer += VarInt.sizeOfVInt(numElements);
int unmappedKeyOrdinal = 0;
for(int i=0;i<numElements;i++) {
int unmappedKeyOrdinalDelta = VarInt.readVInt(record.data, currentRecordPointer);
currentRecordPointer += VarInt.sizeOfVInt(unmappedKeyOrdinalDelta);
int unmappedValueOrdinal = VarInt.readVInt(record.data, currentRecordPointer);
currentRecordPointer += VarInt.sizeOfVInt(unmappedValueOrdinal);
unmappedKeyOrdinal += unmappedKeyOrdinalDelta;
if(rec != null) {
int mappedKeyOrdinal = ordinalMapping.get(unmappedKeyOrdinal);
int mappedValueOrdinal = ordinalMapping.get(unmappedValueOrdinal);
rec.addEntry(mappedKeyOrdinal, mappedValueOrdinal);
}
}
if(engineSchema != null) {
int stateEngineOrdinal = stateEngine.add(engineSchema.getName(), rec);
ordinalMapping.put(currentRecordOrdinal, stateEngineOrdinal);
}
return currentRecordPointer;
}
private int copyObjectRecord(HollowObjectSchema recordSchema, HollowObjectSchema engineSchema, int currentRecordPointer, int currentRecordOrdinal) {
HollowObjectWriteRecord rec = engineSchema != null ? (HollowObjectWriteRecord)getWriteRecord(engineSchema) : null;
for(int i=0;i<recordSchema.numFields();i++) {
String fieldName = recordSchema.getFieldName(i);
FieldType fieldType = recordSchema.getFieldType(i);
boolean fieldExistsInEngine = engineSchema != null && engineSchema.getPosition(fieldName) != -1;
currentRecordPointer = copyObjectField(fieldExistsInEngine ? rec : null, fieldName, fieldType, currentRecordPointer);
}
if(engineSchema != null) {
int stateEngineOrdinal = stateEngine.add(engineSchema.getName(), rec);
ordinalMapping.put(currentRecordOrdinal, stateEngineOrdinal);
}
return currentRecordPointer;
}
private int copyObjectField(HollowObjectWriteRecord rec, String fieldName, FieldType fieldType, int currentRecordPointer) {
switch(fieldType) {
case BOOLEAN:
if(!VarInt.readVNull(record.data, currentRecordPointer)) {
boolean value = record.data.get(currentRecordPointer) == 1;
if(rec != null)
rec.setBoolean(fieldName, value);
}
return currentRecordPointer + 1;
case INT:
if(VarInt.readVNull(record.data, currentRecordPointer))
return currentRecordPointer + 1;
int ivalue = VarInt.readVInt(record.data, currentRecordPointer);
currentRecordPointer += VarInt.sizeOfVInt(ivalue);
if(rec != null)
rec.setInt(fieldName, ZigZag.decodeInt(ivalue));
return currentRecordPointer;
case LONG:
if(VarInt.readVNull(record.data, currentRecordPointer))
return currentRecordPointer + 1;
long lvalue = VarInt.readVLong(record.data, currentRecordPointer);
currentRecordPointer += VarInt.sizeOfVLong(lvalue);
if(rec != null)
rec.setLong(fieldName, ZigZag.decodeLong(lvalue));
return currentRecordPointer;
case FLOAT:
int intBits = record.data.readIntBits(currentRecordPointer);
if(intBits != HollowObjectWriteRecord.NULL_FLOAT_BITS) {
float fvalue = Float.intBitsToFloat(intBits);
if(rec != null)
rec.setFloat(fieldName, fvalue);
}
return currentRecordPointer + 4;
case DOUBLE:
long longBits = record.data.readLongBits(currentRecordPointer);
if(longBits != HollowObjectWriteRecord.NULL_DOUBLE_BITS) {
double dvalue = Double.longBitsToDouble(longBits);
if(rec != null)
rec.setDouble(fieldName, dvalue);
}
return currentRecordPointer + 8;
case STRING:
if(VarInt.readVNull(record.data, currentRecordPointer))
return currentRecordPointer + 1;
int length = VarInt.readVInt(record.data, currentRecordPointer);
currentRecordPointer += VarInt.sizeOfVInt(length);
int cLength = VarInt.countVarIntsInRange(record.data, currentRecordPointer, length);
char[] s = new char[cLength];
for(int i=0;i<cLength;i++) {
int charValue = VarInt.readVInt(record.data, currentRecordPointer);
s[i] = (char)charValue;
currentRecordPointer += VarInt.sizeOfVInt(charValue);
}
if(rec != null)
rec.setString(fieldName, new String(s));
return currentRecordPointer;
case BYTES:
if(VarInt.readVNull(record.data, currentRecordPointer))
return currentRecordPointer + 1;
length = VarInt.readVInt(record.data, currentRecordPointer);
currentRecordPointer += VarInt.sizeOfVInt(length);
byte[] b = new byte[length];
for(int i=0;i<length;i++) {
b[i] = record.data.get(currentRecordPointer++);
}
if(rec != null)
rec.setBytes(fieldName, b);
return currentRecordPointer;
case REFERENCE:
if(VarInt.readVNull(record.data, currentRecordPointer))
return currentRecordPointer + 1;
int unmappedOrdinal = VarInt.readVInt(record.data, currentRecordPointer);
if(rec != null) {
int mappedOrdinal = ordinalMapping.get(unmappedOrdinal);
rec.setReference(fieldName, mappedOrdinal);
}
return currentRecordPointer + VarInt.sizeOfVInt(unmappedOrdinal);
default:
throw new IllegalArgumentException("Unknown field type: " + fieldType);
}
}
private HollowWriteRecord getWriteRecord(HollowSchema schema) {
HollowWriteRecord rec = writeRecords.get(schema.getName());
if(rec == null) {
switch(schema.getSchemaType()) {
case OBJECT:
rec = new HollowObjectWriteRecord((HollowObjectSchema)schema);
break;
case LIST:
rec = new HollowListWriteRecord();
break;
case SET:
rec = new HollowSetWriteRecord();
break;
case MAP:
rec = new HollowMapWriteRecord();
break;
}
writeRecords.put(schema.getName(), rec);
}
rec.reset();
return rec;
}
}
| 9,005 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/write/objectmapper | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/write/objectmapper/flatrecords/FlatRecordExtractor.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.write.objectmapper.flatrecords;
import com.netflix.hollow.core.HollowConstants;
import com.netflix.hollow.core.read.engine.HollowReadStateEngine;
import com.netflix.hollow.core.read.engine.HollowTypeReadState;
import com.netflix.hollow.core.read.engine.list.HollowListTypeReadState;
import com.netflix.hollow.core.read.engine.map.HollowMapTypeReadState;
import com.netflix.hollow.core.read.engine.object.HollowObjectTypeReadState;
import com.netflix.hollow.core.read.engine.set.HollowSetTypeReadState;
import com.netflix.hollow.core.read.iterator.HollowMapEntryOrdinalIterator;
import com.netflix.hollow.core.read.iterator.HollowOrdinalIterator;
import com.netflix.hollow.core.schema.HollowListSchema;
import com.netflix.hollow.core.schema.HollowMapSchema;
import com.netflix.hollow.core.schema.HollowObjectSchema;
import com.netflix.hollow.core.schema.HollowObjectSchema.FieldType;
import com.netflix.hollow.core.schema.HollowSetSchema;
import com.netflix.hollow.core.write.HollowWriteRecord;
import com.netflix.hollow.core.write.copy.HollowRecordCopier;
import com.netflix.hollow.tools.combine.OrdinalRemapper;
import java.util.HashMap;
import java.util.Map;
public class FlatRecordExtractor {
private final HollowReadStateEngine extractFrom;
private final FlatRecordWriter writer;
private final ExtractorOrdinalRemapper ordinalRemapper;
private final Map<String, HollowRecordCopier> recordCopiersByType;
public FlatRecordExtractor(HollowReadStateEngine extractFrom, HollowSchemaIdentifierMapper schemaIdMapper) {
this.extractFrom = extractFrom;
this.writer = new FlatRecordWriter(extractFrom, schemaIdMapper);
this.ordinalRemapper = new ExtractorOrdinalRemapper();
this.recordCopiersByType = new HashMap<>();
}
public synchronized FlatRecord extract(String type, int ordinal) {
ordinalRemapper.clear();
writer.reset();
HollowTypeReadState typeState = extractFrom.getTypeState(type);
extractHollowRecord(typeState, ordinal);
return writer.generateFlatRecord();
}
private void extractHollowRecord(HollowTypeReadState typeState, int ordinal) {
if(ordinal == -1)
return;
traverse(typeState, ordinal);
String type = typeState.getSchema().getName();
HollowRecordCopier recordCopier = recordCopier(type);
HollowWriteRecord rec = recordCopier.copy(ordinal);
int flatOrdinal = writer.write(typeState.getSchema(), rec);
ordinalRemapper.remapOrdinal(type, ordinal, flatOrdinal);
}
private void traverse(HollowTypeReadState typeState, int ordinal) {
switch(typeState.getSchema().getSchemaType()) {
case OBJECT:
traverseObject((HollowObjectTypeReadState)typeState, ordinal);
break;
case LIST:
traverseList((HollowListTypeReadState)typeState, ordinal);
break;
case SET:
traverseSet((HollowSetTypeReadState)typeState, ordinal);
break;
case MAP:
traverseMap((HollowMapTypeReadState)typeState, ordinal);
break;
}
}
private void traverseObject(HollowObjectTypeReadState typeState, int ordinal) {
HollowObjectSchema schema = typeState.getSchema();
for(int i=0;i<schema.numFields();i++) {
if(schema.getFieldType(i) == FieldType.REFERENCE) {
HollowTypeReadState refTypeState = schema.getReferencedTypeState(i);
int refOrdinal = typeState.readOrdinal(ordinal, i);
extractHollowRecord(refTypeState, refOrdinal);
}
}
}
private void traverseList(HollowListTypeReadState typeState, int ordinal) {
HollowListSchema schema = typeState.getSchema();
int size = typeState.size(ordinal);
for(int i=0;i<size;i++) {
int refOrdinal = typeState.getElementOrdinal(ordinal, i);
if(refOrdinal != HollowConstants.ORDINAL_NONE)
extractHollowRecord(schema.getElementTypeState(), refOrdinal);
}
}
private void traverseSet(HollowSetTypeReadState typeState, int ordinal) {
HollowSetSchema schema = typeState.getSchema();
HollowOrdinalIterator iter = typeState.ordinalIterator(ordinal);
int refOrdinal = iter.next();
while(refOrdinal != HollowOrdinalIterator.NO_MORE_ORDINALS) {
if(refOrdinal != HollowConstants.ORDINAL_NONE)
extractHollowRecord(schema.getElementTypeState(), refOrdinal);
refOrdinal = iter.next();
}
}
private void traverseMap(HollowMapTypeReadState typeState, int ordinal) {
HollowMapSchema schema = typeState.getSchema();
HollowMapEntryOrdinalIterator iter = typeState.ordinalIterator(ordinal);
while(iter.next()) {
if(iter.getKey() != HollowConstants.ORDINAL_NONE)
extractHollowRecord(schema.getKeyTypeState(), iter.getKey());
if(iter.getValue() != HollowConstants.ORDINAL_NONE)
extractHollowRecord(schema.getValueTypeState(), iter.getValue());
}
}
private HollowRecordCopier recordCopier(String type) {
HollowRecordCopier recordCopier = recordCopiersByType.get(type);
if(recordCopier == null) {
recordCopier = HollowRecordCopier.createCopier(extractFrom.getTypeState(type), ordinalRemapper, false);
recordCopiersByType.put(type, recordCopier);
}
return recordCopier;
}
private static class ExtractorOrdinalRemapper implements OrdinalRemapper {
private final Map<TypedOrdinal, Integer> mappedFlatOrdinals = new HashMap<>();
@Override
public int getMappedOrdinal(String type, int originalOrdinal) {
return mappedFlatOrdinals.get(new TypedOrdinal(type, originalOrdinal));
}
@Override
public void remapOrdinal(String type, int originalOrdinal, int mappedOrdinal) {
mappedFlatOrdinals.put(new TypedOrdinal(type, originalOrdinal), mappedOrdinal);
}
@Override
public boolean ordinalIsMapped(String type, int originalOrdinal) {
throw new UnsupportedOperationException();
}
public void clear() {
mappedFlatOrdinals.clear();
}
private static class TypedOrdinal {
private final String type;
private final int ordinal;
public TypedOrdinal(String type, int ordinal) {
this.type = type;
this.ordinal = ordinal;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ordinal;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
TypedOrdinal other = (TypedOrdinal) obj;
if (ordinal != other.ordinal)
return false;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
}
}
| 9,006 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/write/objectmapper | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/write/objectmapper/flatrecords/FlatRecord.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.write.objectmapper.flatrecords;
import com.netflix.hollow.core.index.key.PrimaryKey;
import com.netflix.hollow.core.memory.ByteData;
import com.netflix.hollow.core.memory.encoding.VarInt;
import com.netflix.hollow.core.memory.encoding.ZigZag;
import com.netflix.hollow.core.schema.HollowObjectSchema;
import com.netflix.hollow.core.schema.HollowObjectSchema.FieldType;
import com.netflix.hollow.core.schema.HollowSchema;
import com.netflix.hollow.core.schema.HollowSchema.SchemaType;
import com.netflix.hollow.core.write.objectmapper.RecordPrimaryKey;
import java.util.Arrays;
public class FlatRecord {
final HollowSchemaIdentifierMapper schemaIdMapper;
final ByteData data;
final int dataStartByte;
final int dataEndByte;
final RecordPrimaryKey recordPrimaryKey;
public FlatRecord(ByteData recordData, HollowSchemaIdentifierMapper schemaIdMapper) {
this.data = recordData;
this.schemaIdMapper = schemaIdMapper;
int currentRecordPointer = 0;
int locationOfTopRecord = VarInt.readVInt(recordData, currentRecordPointer);
currentRecordPointer += VarInt.sizeOfVInt(locationOfTopRecord);
int end = VarInt.readVInt(recordData, currentRecordPointer);
currentRecordPointer += VarInt.sizeOfVInt(end);
this.dataStartByte = currentRecordPointer;
this.dataEndByte = end + dataStartByte + locationOfTopRecord;
int topRecordSchemaId = VarInt.readVInt(recordData, dataStartByte + locationOfTopRecord);
HollowSchema topRecordSchema = schemaIdMapper.getSchema(topRecordSchemaId);
if (topRecordSchema.getSchemaType() == SchemaType.OBJECT) {
PrimaryKey primaryKey = ((HollowObjectSchema) topRecordSchema).getPrimaryKey();
if (primaryKey != null) {
Object[] recordPrimaryKey = new Object[primaryKey.numFields()];
FieldType[] primaryKeyFieldTypes = schemaIdMapper.getPrimaryKeyFieldTypes(topRecordSchemaId);
int primaryKeyRecordPointer = dataEndByte;
for (int i = 0; i < recordPrimaryKey.length; i++) {
int locationOfField = VarInt.readVInt(recordData, primaryKeyRecordPointer);
primaryKeyRecordPointer += VarInt.sizeOfVInt(locationOfField);
recordPrimaryKey[i] = readPrimaryKeyField(locationOfField + dataStartByte, primaryKeyFieldTypes[i]);
}
this.recordPrimaryKey = new RecordPrimaryKey(topRecordSchema.getName(), recordPrimaryKey);
} else {
this.recordPrimaryKey = null;
}
} else {
this.recordPrimaryKey = null;
}
}
public byte[] toArray() {
byte[] arr = new byte[(int)data.length()];
for(int i=0;i<arr.length;i++) {
arr[i] = data.get(i);
}
return arr;
}
private Object readPrimaryKeyField(int location, FieldType fieldType) {
/// assumption: primary key fields are never null
switch (fieldType) {
case BOOLEAN:
return data.get(location) == 1;
case INT:
return ZigZag.decodeInt(VarInt.readVInt(data, location));
case LONG:
return ZigZag.decodeLong(VarInt.readVLong(data, location));
case DOUBLE:
long longBits = data.readLongBits(location);
return Double.longBitsToDouble(longBits);
case FLOAT:
int intBits = data.readIntBits(location);
return Float.intBitsToFloat(intBits);
case STRING:
int length = VarInt.readVInt(data, location);
location += VarInt.sizeOfVInt(length);
int endLocation = location + length;
char[] s = new char[length];
int cnt = 0;
while(location < endLocation) {
int c = VarInt.readVInt(data, location);
s[cnt] = (char)c;
location += VarInt.sizeOfVInt(c);
cnt++;
}
if(cnt < s.length)
s = Arrays.copyOf(s, cnt);
return new String(s);
case BYTES:
length = VarInt.readVInt(data, location);
location += VarInt.sizeOfVInt(length);
byte[] b = new byte[length];
for(int i=0;i<b.length;i++) {
b[i] = data.get(location++);
}
return b;
case REFERENCE:
default:
throw new IllegalStateException("Should not have encoded primary key with REFERENCE type fields.");
}
}
public RecordPrimaryKey getRecordPrimaryKey() {
return recordPrimaryKey;
}
}
| 9,007 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/FreeOrdinalTracker.java | /*
* Copyright 2016-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.memory;
import java.util.Arrays;
/**
* A stack of unused ordinals.<p>
*
* This data structure is used by the {@link ByteArrayOrdinalMap} to track and assign unused ordinals to new records.
*
* The goal is to ensure the "holes" generated by removing unused ordinals during server processing are reused in subsequent cycles,
* instead of growing the "ordinal space" indefinitely.
*
* @author dkoszewnik
*
*/
public class FreeOrdinalTracker {
private int freeOrdinals[];
private int size;
private int nextEmptyOrdinal;
public FreeOrdinalTracker() {
this(0);
}
private FreeOrdinalTracker(int nextEmptyOrdinal) {
this.freeOrdinals = new int[64];
this.nextEmptyOrdinal = nextEmptyOrdinal;
this.size = 0;
}
/**
* @return either an ordinal which was previously deallocated, or the next empty, previously unallocated ordinal in the sequence 0-n
*/
public int getFreeOrdinal() {
if(size == 0)
return nextEmptyOrdinal++;
return freeOrdinals[--size];
}
/**
* Return an ordinal to the pool after the object to which it was assigned is discarded.
*
* @param ordinal the ordinal
*/
public void returnOrdinalToPool(int ordinal) {
if(size == freeOrdinals.length) {
freeOrdinals = Arrays.copyOf(freeOrdinals, freeOrdinals.length * 3 / 2);
}
freeOrdinals[size] = ordinal;
size++;
}
/**
* Specify the next ordinal to return after the reusable pool is exhausted
*
* @param nextEmptyOrdinal the next empty ordinal
*/
public void setNextEmptyOrdinal(int nextEmptyOrdinal) {
this.nextEmptyOrdinal = nextEmptyOrdinal;
}
/**
* Ensure that all future ordinals are returned in ascending order.
*/
public void sort() {
Arrays.sort(freeOrdinals, 0, size);
reverseFreeOrdinalPool();
}
/**
* Focus returned ordinal holes in as few shards as possible.
* Within each shard, return ordinals in ascending order.
*/
public void sort(int numShards) {
int shardNumberMask = numShards - 1;
Shard shards[] = new Shard[numShards];
for(int i=0;i<shards.length;i++)
shards[i] = new Shard();
for(int i=0;i<size;i++)
shards[freeOrdinals[i] & shardNumberMask].freeOrdinalCount++;
Shard orderedShards[] = Arrays.copyOf(shards, shards.length);
Arrays.sort(orderedShards, (s1, s2) -> s2.freeOrdinalCount - s1.freeOrdinalCount);
for(int i=1;i<numShards;i++)
orderedShards[i].currentPos = orderedShards[i-1].currentPos + orderedShards[i-1].freeOrdinalCount;
/// each shard will receive the ordinals in ascending order.
Arrays.sort(freeOrdinals, 0, size);
int newFreeOrdinals[] = new int[freeOrdinals.length];
for(int i=0;i<size;i++) {
Shard shard = shards[freeOrdinals[i] & shardNumberMask];
newFreeOrdinals[shard.currentPos] = freeOrdinals[i];
shard.currentPos++;
}
freeOrdinals = newFreeOrdinals;
reverseFreeOrdinalPool();
}
private static class Shard {
private int freeOrdinalCount;
private int currentPos;
}
private void reverseFreeOrdinalPool() {
int midpoint = size / 2;
for(int i=0;i<midpoint;i++) {
int temp = freeOrdinals[i];
freeOrdinals[i] = freeOrdinals[size-i-1];
freeOrdinals[size-i-1] = temp;
}
}
/**
* Resets the FreeOrdinalTracker to its initial state.
*/
public void reset() {
size = 0;
nextEmptyOrdinal = 0;
}
}
| 9,008 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/VariableLengthData.java | package com.netflix.hollow.core.memory;
import com.netflix.hollow.core.read.HollowBlobInput;
import java.io.IOException;
/**
* Conceptually this can be thought of as a single byte array or buffer of undefined length. It will grow automatically
* when a byte is written to an index greater than the currently allocated array/buffer.
* *
*/
public interface VariableLengthData extends ByteData {
/**
* Load <i>length</i> bytes of data from the supplied {@code HollowBlobInput}
*
* @param in the {@code HollowBlobInput}
* @param length the length of the data to load
* @throws IOException if data could not be loaded
*/
void loadFrom(HollowBlobInput in, long length) throws IOException;
/**
* Copy bytes from another {@code VariableLengthData} object.
*
* @param src the source {@code VariableLengthData}
* @param srcPos position in source data to begin copying from
* @param destPos position in destination data to begin copying to
* @param length length of data to copy in bytes
*/
void copy(ByteData src, long srcPos, long destPos, long length);
/**
* Copies data from the provided source into destination, guaranteeing that if the update is seen
* by another thread, then all other writes prior to this call are also visible to that thread.
*
* @param src the source data
* @param srcPos position in source data to begin copying from
* @param destPos position in destination to begin copying to
* @param length length of data to copy in bytes
*/
void orderedCopy(VariableLengthData src, long srcPos, long destPos, long length);
/**
* Data size in bytes
* @return size in bytes
*/
long size();
}
| 9,009 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/VariableLengthDataFactory.java | package com.netflix.hollow.core.memory;
import com.netflix.hollow.core.memory.pool.ArraySegmentRecycler;
import java.util.logging.Logger;
public class VariableLengthDataFactory {
private static final Logger LOG = Logger.getLogger(VariableLengthDataFactory.class.getName());
public static VariableLengthData get(MemoryMode memoryMode, ArraySegmentRecycler memoryRecycler) {
if (memoryMode.equals(MemoryMode.ON_HEAP)) {
return new SegmentedByteArray(memoryRecycler);
} else if (memoryMode.equals(MemoryMode.SHARED_MEMORY_LAZY)) {
/// list pointer array
return new EncodedByteBuffer();
} else {
throw new UnsupportedOperationException("Memory mode " + memoryMode.name() + " not supported");
}
}
public static void destroy(VariableLengthData vld) {
if (vld instanceof SegmentedByteArray) {
((SegmentedByteArray) vld).destroy();
} else if (vld instanceof EncodedByteBuffer) {
LOG.warning("Destroy operation is a no-op in shared memory mode");
} else {
throw new UnsupportedOperationException("Unknown type");
}
}
}
| 9,010 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/MemoryMode.java | package com.netflix.hollow.core.memory;
public enum MemoryMode {
ON_HEAP, // eager load into main memory, on JVM heap
SHARED_MEMORY_LAZY; // map to virtual memory and lazy load into main memory, off heap
// SHARED_MEMORY_EAGER // (in future) map to virtual memory and eager load into main memory, off heap
/*
* Returns whether a memory mode is supported by Hollow consumer
*/
public boolean consumerSupported() {
return this.equals(ON_HEAP) || this.equals(SHARED_MEMORY_LAZY);
}
/*
* Returns whether a memory mode supports type filtering
*/
public boolean supportsFiltering() {
return this.equals(ON_HEAP);
}
}
| 9,011 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/ThreadSafeBitSet.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.memory;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Arrays;
import java.util.BitSet;
import java.util.concurrent.atomic.AtomicLongArray;
import java.util.concurrent.atomic.AtomicReference;
/**
* This is a lock-free, thread-safe version of a {@link java.util.BitSet}.<p>
*
* Instead of a long array to hold the bits, this implementation uses an AtomicLongArray, then
* does the appropriate compare-and-swap operations when setting the bits.
*
* @author dkoszewnik
*
*/
public class ThreadSafeBitSet {
public static final int DEFAULT_LOG2_SEGMENT_SIZE_IN_BITS = 14;
private final int numLongsPerSegment;
private final int log2SegmentSize;
private final int segmentMask;
private final AtomicReference<ThreadSafeBitSetSegments> segments;
public ThreadSafeBitSet() {
this(DEFAULT_LOG2_SEGMENT_SIZE_IN_BITS); /// 16384 bits, 2048 bytes, 256 longs per segment
}
public ThreadSafeBitSet(int log2SegmentSizeInBits) {
this(log2SegmentSizeInBits, 0);
}
public ThreadSafeBitSet(int log2SegmentSizeInBits, int numBitsToPreallocate) {
if(log2SegmentSizeInBits < 6)
throw new IllegalArgumentException("Cannot specify fewer than 64 bits in each segment!");
this.log2SegmentSize = log2SegmentSizeInBits;
this.numLongsPerSegment = (1 << (log2SegmentSizeInBits - 6));
this.segmentMask = numLongsPerSegment - 1;
long numBitsPerSegment = numLongsPerSegment * 64;
int numSegmentsToPreallocate = numBitsToPreallocate == 0 ? 1 : (int)(((numBitsToPreallocate - 1) / numBitsPerSegment) + 1);
segments = new AtomicReference<ThreadSafeBitSetSegments>();
segments.set(new ThreadSafeBitSetSegments(numSegmentsToPreallocate, numLongsPerSegment));
}
public void set(int position) {
int segmentPosition = position >>> log2SegmentSize; /// which segment -- div by num bits per segment
int longPosition = (position >>> 6) & segmentMask; /// which long in the segment -- remainder of div by num bits per segment
int bitPosition = position & 0x3F; /// which bit in the long -- remainder of div by num bits in long (64)
AtomicLongArray segment = getSegment(segmentPosition);
long mask = 1L << bitPosition;
// Thread safety: we need to loop until we win the race to set the long value.
while(true) {
// determine what the new long value will be after we set the appropriate bit.
long currentLongValue = segment.get(longPosition);
long newLongValue = currentLongValue | mask;
// if no other thread has modified the value since we read it, we won the race and we are done.
if(segment.compareAndSet(longPosition, currentLongValue, newLongValue))
break;
}
}
public void clear(int position) {
int segmentPosition = position >>> log2SegmentSize; /// which segment -- div by num bits per segment
int longPosition = (position >>> 6) & segmentMask; /// which long in the segment -- remainder of div by num bits per segment
int bitPosition = position & 0x3F; /// which bit in the long -- remainder of div by num bits in long (64)
AtomicLongArray segment = getSegment(segmentPosition);
long mask = ~(1L << bitPosition);
// Thread safety: we need to loop until we win the race to set the long value.
while(true) {
// determine what the new long value will be after we set the appropriate bit.
long currentLongValue = segment.get(longPosition);
long newLongValue = currentLongValue & mask;
// if no other thread has modified the value since we read it, we won the race and we are done.
if(segment.compareAndSet(longPosition, currentLongValue, newLongValue))
break;
}
}
public boolean get(int position) {
int segmentPosition = position >>> log2SegmentSize; /// which segment -- div by num bits per segment
int longPosition = (position >>> 6) & segmentMask; /// which long in the segment -- remainder of div by num bits per segment
int bitPosition = position & 0x3F; /// which bit in the long -- remainder of div by num bits in long (64)
AtomicLongArray segment = getSegment(segmentPosition);
long mask = 1L << bitPosition;
return ((segment.get(longPosition) & mask) != 0);
}
public long maxSetBit() {
ThreadSafeBitSetSegments segments = this.segments.get();
int segmentIdx = segments.numSegments() - 1;
for(;segmentIdx >= 0; segmentIdx--) {
AtomicLongArray segment = segments.getSegment(segmentIdx);
for(int longIdx=segment.length() - 1; longIdx >= 0; longIdx--) {
long l = segment.get(longIdx);
if(l != 0)
return (segmentIdx << log2SegmentSize) + (longIdx * 64) + (63 - Long.numberOfLeadingZeros(l));
}
}
return -1;
}
public int nextSetBit(int fromIndex) {
if (fromIndex < 0)
throw new IndexOutOfBoundsException("fromIndex < 0: " + fromIndex);
int segmentPosition = fromIndex >>> log2SegmentSize; /// which segment -- div by num bits per segment
ThreadSafeBitSetSegments segments = this.segments.get();
if(segmentPosition >= segments.numSegments())
return -1;
int longPosition = (fromIndex >>> 6) & segmentMask; /// which long in the segment -- remainder of div by num bits per segment
int bitPosition = fromIndex & 0x3F; /// which bit in the long -- remainder of div by num bits in long (64)
AtomicLongArray segment = segments.getSegment(segmentPosition);
long word = segment.get(longPosition) & (0xffffffffffffffffL << bitPosition);
while (true) {
if (word != 0)
return (segmentPosition << (log2SegmentSize)) + (longPosition << 6) + Long.numberOfTrailingZeros(word);
if (++longPosition > segmentMask) {
segmentPosition++;
if(segmentPosition >= segments.numSegments())
return -1;
segment = segments.getSegment(segmentPosition);
longPosition = 0;
}
word = segment.get(longPosition);
}
}
/**
* @return the number of bits which are set in this bit set.
*/
public int cardinality() {
ThreadSafeBitSetSegments segments = this.segments.get();
int numSetBits = 0;
for(int i=0;i<segments.numSegments();i++) {
AtomicLongArray segment = segments.getSegment(i);
for(int j=0;j<segment.length();j++) {
numSetBits += Long.bitCount(segment.get(j));
}
}
return numSetBits;
}
/**
* @return the number of bits which are current specified by this bit set. This is the maximum value
* to which you might need to iterate, if you were to iterate over all bits in this set.
*/
public int currentCapacity() {
return segments.get().numSegments() * (1 << log2SegmentSize);
}
/**
* Clear all bits to 0.
*/
public void clearAll() {
ThreadSafeBitSetSegments segments = this.segments.get();
for(int i=0;i<segments.numSegments();i++) {
AtomicLongArray segment = segments.getSegment(i);
for(int j=0;j<segment.length();j++) {
segment.set(j, 0L);
}
}
}
/**
* Return a new bit set which contains all bits which are contained in this bit set, and which are NOT contained in the <code>other</code> bit set.<p>
*
* In other words, return a new bit set, which is a bitwise and with the bitwise not of the other bit set.
*
* @param other the other bit set
* @return the resulting bit set
*/
public ThreadSafeBitSet andNot(ThreadSafeBitSet other) {
if(other.log2SegmentSize != log2SegmentSize)
throw new IllegalArgumentException("Segment sizes must be the same");
ThreadSafeBitSetSegments thisSegments = this.segments.get();
ThreadSafeBitSetSegments otherSegments = other.segments.get();
ThreadSafeBitSetSegments newSegments = new ThreadSafeBitSetSegments(thisSegments.numSegments(), numLongsPerSegment);
for(int i=0;i<thisSegments.numSegments();i++) {
AtomicLongArray thisArray = thisSegments.getSegment(i);
AtomicLongArray otherArray = (i < otherSegments.numSegments()) ? otherSegments.getSegment(i) : null;
AtomicLongArray newArray = newSegments.getSegment(i);
for(int j=0;j<thisArray.length();j++) {
long thisLong = thisArray.get(j);
long otherLong = (otherArray == null) ? 0 : otherArray.get(j);
newArray.set(j, thisLong & ~otherLong);
}
}
ThreadSafeBitSet andNot = new ThreadSafeBitSet(log2SegmentSize);
andNot.segments.set(newSegments);
return andNot;
}
/**
* Return a new bit set which contains all bits which are contained in *any* of the specified bit sets.
*
* @param bitSets the other bit sets
* @return the resulting bit set
*/
public static ThreadSafeBitSet orAll(ThreadSafeBitSet... bitSets) {
if(bitSets.length == 0)
return new ThreadSafeBitSet();
int log2SegmentSize = bitSets[0].log2SegmentSize;
int numLongsPerSegment = bitSets[0].numLongsPerSegment;
ThreadSafeBitSetSegments segments[] = new ThreadSafeBitSetSegments[bitSets.length];
int maxNumSegments = 0;
for(int i=0;i<bitSets.length;i++) {
if(bitSets[i].log2SegmentSize != log2SegmentSize)
throw new IllegalArgumentException("Segment sizes must be the same");
segments[i] = bitSets[i].segments.get();
if(segments[i].numSegments() > maxNumSegments)
maxNumSegments = segments[i].numSegments();
}
ThreadSafeBitSetSegments newSegments = new ThreadSafeBitSetSegments(maxNumSegments, numLongsPerSegment);
AtomicLongArray segment[] = new AtomicLongArray[segments.length];
for(int i=0;i<maxNumSegments;i++) {
for(int j=0;j<segments.length;j++) {
segment[j] = i < segments[j].numSegments() ? segments[j].getSegment(i) : null;
}
AtomicLongArray newSegment = newSegments.getSegment(i);
for(int j=0;j<numLongsPerSegment;j++) {
long value = 0;
for(int k=0;k<segments.length;k++) {
if(segment[k] != null)
value |= segment[k].get(j);
}
newSegment.set(j, value);
}
}
ThreadSafeBitSet or = new ThreadSafeBitSet(log2SegmentSize);
or.segments.set(newSegments);
return or;
}
/**
* Get the segment at <code>segmentIndex</code>. If this segment does not yet exist, create it.
*
* @param segmentIndex the segment index
* @return the segment
*/
private AtomicLongArray getSegment(int segmentIndex) {
ThreadSafeBitSetSegments visibleSegments = segments.get();
while(visibleSegments.numSegments() <= segmentIndex) {
/// Thread safety: newVisibleSegments contains all of the segments from the currently visible segments, plus extra.
/// all of the segments in the currently visible segments are canonical and will not change.
ThreadSafeBitSetSegments newVisibleSegments = new ThreadSafeBitSetSegments(visibleSegments, segmentIndex + 1, numLongsPerSegment);
/// because we are using a compareAndSet, if this thread "wins the race" and successfully sets this variable, then the segments
/// which are newly defined in newVisibleSegments become canonical.
if(segments.compareAndSet(visibleSegments, newVisibleSegments)) {
visibleSegments = newVisibleSegments;
} else {
/// If we "lose the race" and are growing the ThreadSafeBitSet segments larger,
/// then we will gather the new canonical sets from the update which we missed on the next iteration of this loop.
/// Newly defined segments in newVisibleSegments will be discarded, they do not get to become canonical.
visibleSegments = segments.get();
}
}
return visibleSegments.getSegment(segmentIndex);
}
private static class ThreadSafeBitSetSegments {
private final AtomicLongArray segments[];
private ThreadSafeBitSetSegments(int numSegments, int segmentLength) {
AtomicLongArray segments[] = new AtomicLongArray[numSegments];
for(int i=0;i<numSegments;i++) {
segments[i] = new AtomicLongArray(segmentLength);
}
/// Thread safety: Because this.segments is final, the preceding operations in this constructor are guaranteed to be visible to any
/// other thread which accesses this.segments.
this.segments = segments;
}
private ThreadSafeBitSetSegments(ThreadSafeBitSetSegments copyFrom, int numSegments, int segmentLength) {
AtomicLongArray segments[] = new AtomicLongArray[numSegments];
for(int i=0;i<numSegments;i++) {
segments[i] = i < copyFrom.numSegments() ? copyFrom.getSegment(i) : new AtomicLongArray(segmentLength);
}
/// see above re: thread-safety of this assignment
this.segments = segments;
}
public int numSegments() {
return segments.length;
}
public AtomicLongArray getSegment(int index) {
return segments[index];
}
}
public void serializeBitsTo(DataOutputStream os) throws IOException {
ThreadSafeBitSetSegments segments = this.segments.get();
os.writeInt(segments.numSegments() * numLongsPerSegment);
for(int i=0;i<segments.numSegments();i++) {
AtomicLongArray arr = segments.getSegment(i);
for(int j=0;j<arr.length();j++) {
os.writeLong(arr.get(j));
}
}
}
@Override
public boolean equals(Object obj) {
if(!(obj instanceof ThreadSafeBitSet))
return false;
ThreadSafeBitSet other = (ThreadSafeBitSet)obj;
if(other.log2SegmentSize != log2SegmentSize)
throw new IllegalArgumentException("Segment sizes must be the same");
ThreadSafeBitSetSegments thisSegments = this.segments.get();
ThreadSafeBitSetSegments otherSegments = other.segments.get();
for(int i=0;i<thisSegments.numSegments();i++) {
AtomicLongArray thisArray = thisSegments.getSegment(i);
AtomicLongArray otherArray = (i < otherSegments.numSegments()) ? otherSegments.getSegment(i) : null;
for(int j=0;j<thisArray.length();j++) {
long thisLong = thisArray.get(j);
long otherLong = (otherArray == null) ? 0 : otherArray.get(j);
if(thisLong != otherLong)
return false;
}
}
for(int i=thisSegments.numSegments();i<otherSegments.numSegments();i++) {
AtomicLongArray otherArray = otherSegments.getSegment(i);
for(int j=0;j<otherArray.length();j++) {
long l = otherArray.get(j);
if(l != 0)
return false;
}
}
return true;
}
@Override
public int hashCode() {
int result = log2SegmentSize;
result = 31 * result + Arrays.hashCode(segments.get().segments);
return result;
}
/**
* @return a new BitSet with same bits set
*/
public BitSet toBitSet() {
BitSet resultSet = new BitSet();
int ordinal = this.nextSetBit(0);
while(ordinal!=-1) {
resultSet.set(ordinal);
ordinal = this.nextSetBit(ordinal + 1);
}
return resultSet;
}
@Override
public String toString() {
return toBitSet().toString();
}
}
| 9,012 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/ByteData.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.memory;
/**
* This interface is used to hide the underlying implementation of a range of bytes.
*
* This is useful because Hollow often uses pooled arrays to back range of bytes.
*
* @see SegmentedByteArray
* @see EncodedByteBuffer
*
* @author dkoszewnik
*
*/
public interface ByteData {
default long readLongBits(long position) {
long longBits = (long) (get(position++) & 0xFF) << 56;
longBits |= (long) (get(position++) & 0xFF) << 48;
longBits |= (long) (get(position++) & 0xFF) << 40;
longBits |= (long) (get(position++) & 0xFF) << 32;
longBits |= (long) (get(position++) & 0xFF) << 24;
longBits |= (get(position++) & 0xFF) << 16;
longBits |= (get(position++) & 0xFF) << 8;
longBits |= (get(position) & 0xFF);
return longBits;
}
default int readIntBits(long position) {
int intBits = (get(position++) & 0xFF) << 24;
intBits |= (get(position++) & 0xFF) << 16;
intBits |= (get(position++) & 0xFF) << 8;
intBits |= (get(position) & 0xFF);
return intBits;
}
default long length() {
throw new UnsupportedOperationException();
}
/**
* Get the value of the byte at the specified position.
* @param index the position (in byte units)
* @return the byte value
*/
byte get(long index);
}
| 9,013 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/FixedLengthData.java | package com.netflix.hollow.core.memory;
import com.netflix.hollow.core.memory.encoding.VarInt;
import com.netflix.hollow.core.read.HollowBlobInput;
import java.io.IOException;
/**
* <p>
* Each record in Hollow begins with a fixed-length number of bits. At the lowest level, these bits are held in
* {@code FixedLengthData} data structures which can be either backed by long arrays or ByteBuffers.
* For example, if an EncodedLongBuffer was queried for the 6-bit value starting at bit 7 in the following example
* range of bits:
* </p>
* <pre>
* 0001000100100001101000010100101001111010101010010010101
* </pre>
* <p> the value 100100 in binary, or 36 in base 10, would be returned. </p>
* <p>
* As a result there two ways to obtain an element value from the bit string at a given bit index. The first,
* using {@link #getElementValue} for values less than 59 bits in length and the second, using
* {@link #getLargeElementValue} that is recommended for values of upto 64 bits in length.
* </p>
*/
public interface FixedLengthData {
/**
* Gets an element value, comprising of {@code bitsPerElement} bits, at the given
* bit {@code index}. {@code bitsPerElement} should be less than 59 bits.
*
* @param index the bit index
* @param bitsPerElement bits per element, must be less than 59 otherwise
* the result is undefined
* @return the element value
*/
long getElementValue(long index, int bitsPerElement);
/**
* Gets a masked element value, comprising of {@code bitsPerElement} bits, at the given
* bit {@code index}.
*
* @param index the bit index
* @param bitsPerElement bits per element, must be less than 59 otherwise
* the result is undefined
* @param mask the mask to apply to an element value before it is returned.
* The mask should be less than or equal to {@code (1L << bitsPerElement) - 1} to
* guarantee that one or more (possibly) partial element values occurring
* before and after the desired element value are not included in the returned value.
* @return the masked element value
*/
long getElementValue(long index, int bitsPerElement, long mask);
/**
* Gets a large element value, comprising of {@code bitsPerElement} bits, at the given
* bit {@code index}.
* <p>
* This method should be utilized if the {@code bitsPerElement} may exceed {@code 58} bits,
* otherwise the method {@link #getLargeElementValue(long, int)} can be utilized instead.
*
* @param index the bit index
* @param bitsPerElement bits per element, may be greater than 58
* @return the large element value
*/
long getLargeElementValue(long index, int bitsPerElement);
/**
* Gets a masked large element value, comprising of {@code bitsPerElement} bits, at the given
* bit {@code index}.
* <p>
* This method should be utilized if the {@code bitsPerElement} may exceed {@code 58} bits,
* otherwise the method {@link #getLargeElementValue(long, int, long)} can be utilized instead.
*
* @param index the bit index
* @param bitsPerElement bits per element, may be greater than 58
* @param mask the mask to apply to an element value before it is returned.
* The mask should be less than or equal to {@code (1L << bitsPerElement) - 1} to
* guarantee that one or more (possibly) partial element values occurring
* before and after the desired element value are not included in the returned value.
* @return the masked large element value
*/
long getLargeElementValue(long index, int bitsPerElement, long mask);
void setElementValue(long index, int bitsPerElement, long value);
void copyBits(FixedLengthData copyFrom, long sourceStartBit, long destStartBit, long numBits);
void incrementMany(long startBit, long increment, long bitsBetweenIncrements, int numIncrements);
void clearElementValue(long index, int bitsPerElement);
/**
* Discard fixed length data from input. The input contains the number of longs to discard.
*
* @param in Hollow Blob Input to discard data from
*/
static void discardFrom(HollowBlobInput in) throws IOException {
long numLongs = VarInt.readVLong(in);
long bytesToSkip = numLongs * 8;
while(bytesToSkip > 0) {
bytesToSkip -= in.skipBytes(bytesToSkip);
}
}
static int bitsRequiredToRepresentValue(long value) {
if(value == 0)
return 1;
return 64 - Long.numberOfLeadingZeros(value);
}
}
| 9,014 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/FixedLengthDataFactory.java | package com.netflix.hollow.core.memory;
import com.netflix.hollow.core.memory.encoding.EncodedLongBuffer;
import com.netflix.hollow.core.memory.encoding.FixedLengthElementArray;
import com.netflix.hollow.core.memory.pool.ArraySegmentRecycler;
import com.netflix.hollow.core.read.HollowBlobInput;
import java.io.IOException;
import java.util.logging.Logger;
public class FixedLengthDataFactory {
private static final Logger LOG = Logger.getLogger(FixedLengthDataFactory.class.getName());
public static FixedLengthData get(HollowBlobInput in, MemoryMode memoryMode, ArraySegmentRecycler memoryRecycler) throws IOException {
if (memoryMode.equals(MemoryMode.ON_HEAP)) {
return FixedLengthElementArray.newFrom(in, memoryRecycler);
} else if (memoryMode.equals(MemoryMode.SHARED_MEMORY_LAZY)) {
return EncodedLongBuffer.newFrom(in);
} else {
throw new UnsupportedOperationException("Memory mode " + memoryMode.name() + " not supported");
}
}
public static FixedLengthData get(long numBits, MemoryMode memoryMode, ArraySegmentRecycler memoryRecycler) {
if (memoryMode.equals(MemoryMode.ON_HEAP)) {
return new FixedLengthElementArray(memoryRecycler, numBits);
} else {
throw new UnsupportedOperationException("Memory mode " + memoryMode.name() + " not supported");
}
}
public static void destroy(FixedLengthData fld, ArraySegmentRecycler memoryRecycler) {
if (fld instanceof FixedLengthElementArray) {
((FixedLengthElementArray) fld).destroy(memoryRecycler);
} else if (fld instanceof EncodedLongBuffer) {
LOG.warning("Destroy operation is a no-op in shared memory mode");
} else {
throw new UnsupportedOperationException("Unknown type");
}
}
}
| 9,015 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/SegmentedByteArray.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.memory;
import com.netflix.hollow.core.memory.pool.ArraySegmentRecycler;
import com.netflix.hollow.core.read.HollowBlobInput;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Arrays;
import sun.misc.Unsafe;
/**
* A segmented byte array backs the {@link ByteData} interface with array segments, which potentially come from a pool of reusable memory.<p>
*
* This ByteData can grow without allocating successively larger blocks and copying memory.<p>
*
* Segment length is always a power of two so that the location of a given index can be found with mask and shift operations.<p>
*
* Conceptually this can be thought of as a single byte array of undefined length. The currently allocated buffer will always be
* a multiple of the size of the segments. The buffer will grow automatically when a byte is written to an index greater than the
* currently allocated buffer.
*
* @see ArraySegmentRecycler
*
* @author dkoszewnik
*
*/
@SuppressWarnings("restriction")
public class SegmentedByteArray implements VariableLengthData {
private static final Unsafe unsafe = HollowUnsafeHandle.getUnsafe();
private byte[][] segments;
private final int log2OfSegmentSize;
private final int bitmask;
private final ArraySegmentRecycler memoryRecycler;
public SegmentedByteArray(ArraySegmentRecycler memoryRecycler) {
this.segments = new byte[2][];
this.log2OfSegmentSize = memoryRecycler.getLog2OfByteSegmentSize();
this.bitmask = (1 << log2OfSegmentSize) - 1;
this.memoryRecycler = memoryRecycler;
}
/**
* Set the byte at the given index to the specified value
* @param index the index
* @param value the byte value
*/
public void set(long index, byte value) {
int segmentIndex = (int)(index >> log2OfSegmentSize);
ensureCapacity(segmentIndex);
segments[segmentIndex][(int)(index & bitmask)] = value;
}
/**
* Get the value of the byte at the specified index.
* @param index the index
* @return the byte value
*/
@Override
public byte get(long index) {
return segments[(int)(index >>> log2OfSegmentSize)][(int)(index & bitmask)];
}
@Override
public void copy(ByteData src, long srcPos, long destPos, long length) {
for(long i=0;i<length;i++) {
set(destPos++, src.get(srcPos++));
}
}
/**
* For a SegmentedByteArray, this is a faster copy implementation.
*
* @param src the source data
* @param srcPos the position to begin copying from the source data
* @param destPos the position to begin writing in this array
* @param length the length of the data to copy
*/
public void copy(SegmentedByteArray src, long srcPos, long destPos, long length) {
int segmentLength = 1 << log2OfSegmentSize;
int currentSegment = (int)(destPos >>> log2OfSegmentSize);
int segmentStartPos = (int)(destPos & bitmask);
int remainingBytesInSegment = segmentLength - segmentStartPos;
while(length > 0) {
int bytesToCopyFromSegment = (int) Math.min(remainingBytesInSegment, length);
ensureCapacity(currentSegment);
int copiedBytes = src.copy(srcPos, segments[currentSegment], segmentStartPos, bytesToCopyFromSegment);
srcPos += copiedBytes;
length -= copiedBytes;
segmentStartPos = 0;
remainingBytesInSegment = segmentLength;
currentSegment++;
}
}
/**
* copies exactly data.length bytes from this SegmentedByteArray into the provided byte array
*
* @param srcPos the position to begin copying from the source data
* @param data the source data
* @param destPos the position to begin writing in this array
* @param length the length of the data to copy
* @return the number of bytes copied
*/
public int copy(long srcPos, byte[] data, int destPos, int length) {
int segmentSize = 1 << log2OfSegmentSize;
int remainingBytesInSegment = (int)(segmentSize - (srcPos & bitmask));
int dataPosition = destPos;
while(length > 0) {
byte[] segment = segments[(int)(srcPos >>> log2OfSegmentSize)];
int bytesToCopyFromSegment = Math.min(remainingBytesInSegment, length);
System.arraycopy(segment, (int)(srcPos & bitmask), data, dataPosition, bytesToCopyFromSegment);
dataPosition += bytesToCopyFromSegment;
srcPos += bytesToCopyFromSegment;
remainingBytesInSegment = segmentSize - (int)(srcPos & bitmask);
length -= bytesToCopyFromSegment;
}
return dataPosition - destPos;
}
/**
* checks equality for a specified range of bytes in two arrays
*
* @param rangeStart the start position of the comparison range in this array
* @param compareTo the other array to compare
* @param cmpStart the start position of the comparison range in the other array
* @param length the length of the comparison range
* @return
*/
public boolean rangeEquals(long rangeStart, SegmentedByteArray compareTo, long cmpStart, int length) {
for(int i=0;i<length;i++)
if(get(rangeStart + i) != compareTo.get(cmpStart + i))
return false;
return true;
}
@Override
public void orderedCopy(VariableLengthData src, long srcPos, long destPos, long length) {
int segmentLength = 1 << log2OfSegmentSize;
int currentSegment = (int)(destPos >>> log2OfSegmentSize);
int segmentStartPos = (int)(destPos & bitmask);
int remainingBytesInSegment = segmentLength - segmentStartPos;
while(length > 0) {
int bytesToCopyFromSegment = (int) Math.min(remainingBytesInSegment, length);
ensureCapacity(currentSegment);
int copiedBytes = ((SegmentedByteArray) src).orderedCopy(srcPos, segments[currentSegment], segmentStartPos, bytesToCopyFromSegment);
srcPos += copiedBytes;
length -= copiedBytes;
segmentStartPos = 0;
remainingBytesInSegment = segmentLength;
currentSegment++;
}
}
/**
* copies exactly data.length bytes from this SegmentedByteArray into the provided byte array,
* guaranteeing that if the update is seen by another thread, then all other writes prior to
* this call are also visible to that thread.
*
* @param srcPos the position to begin copying from the source data
* @param data the source data
* @param destPos the position to begin writing in this array
* @param length the length of the data to copy
* @return the number of bytes copied
*/
private int orderedCopy(long srcPos, byte[] data, int destPos, int length) {
int segmentSize = 1 << log2OfSegmentSize;
int remainingBytesInSegment = (int)(segmentSize - (srcPos & bitmask));
int dataPosition = destPos;
while(length > 0) {
byte[] segment = segments[(int)(srcPos >>> log2OfSegmentSize)];
int bytesToCopyFromSegment = Math.min(remainingBytesInSegment, length);
orderedCopy(segment, (int)(srcPos & bitmask), data, dataPosition, bytesToCopyFromSegment);
dataPosition += bytesToCopyFromSegment;
srcPos += bytesToCopyFromSegment;
remainingBytesInSegment = segmentSize - (int)(srcPos & bitmask);
length -= bytesToCopyFromSegment;
}
return dataPosition - destPos;
}
@Override
public void loadFrom(HollowBlobInput is, long length) throws IOException {
int segmentSize = 1 << log2OfSegmentSize;
int segment = 0;
byte scratch[] = new byte[segmentSize];
while(length > 0) {
ensureCapacity(segment);
long bytesToCopy = Math.min(segmentSize, length);
long bytesCopied = 0;
while(bytesCopied < bytesToCopy) {
bytesCopied += is.read(scratch, (int)bytesCopied, (int)(bytesToCopy - bytesCopied));
}
orderedCopy(scratch, 0, segments[segment++], 0, (int)bytesCopied);
length -= bytesCopied;
}
}
/**
* Write a portion of this data to an OutputStream.
*
* @param os the output stream to write to
* @param startPosition the position to begin copying from this array
* @param len the length of the data to copy
* @throws IOException if the write to the output stream could not be performed
*/
public void writeTo(OutputStream os, long startPosition, long len) throws IOException {
int segmentSize = 1 << log2OfSegmentSize;
int remainingBytesInSegment = segmentSize - (int)(startPosition & bitmask);
long remainingBytesInCopy = len;
while(remainingBytesInCopy > 0) {
long bytesToCopyFromSegment = Math.min(remainingBytesInSegment, remainingBytesInCopy);
os.write(segments[(int)(startPosition >>> log2OfSegmentSize)], (int)(startPosition & bitmask), (int)bytesToCopyFromSegment);
startPosition += bytesToCopyFromSegment;
remainingBytesInSegment = segmentSize - (int)(startPosition & bitmask);
remainingBytesInCopy -= bytesToCopyFromSegment;
}
}
private void orderedCopy(byte[] src, int srcPos, byte[] dest, int destPos, int length) {
int endSrcPos = srcPos + length;
destPos += Unsafe.ARRAY_BYTE_BASE_OFFSET;
while(srcPos < endSrcPos) {
unsafe.putByteVolatile(dest, destPos++, src[srcPos++]);
}
}
/**
* Ensures that the segment at segmentIndex exists
*
* @param segmentIndex the segment index
*/
private void ensureCapacity(int segmentIndex) {
while(segmentIndex >= segments.length) {
segments = Arrays.copyOf(segments, segments.length * 3 / 2);
}
if(segments[segmentIndex] == null) {
segments[segmentIndex] = memoryRecycler.getByteArray();
}
}
public void destroy() {
for(int i=0;i<segments.length;i++) {
if(segments[i] != null)
memoryRecycler.recycleByteArray(segments[i]);
}
}
@Override
public long size() {
long size = 0;
for(int i=0;i<segments.length;i++) {
if(segments[i] != null)
size += segments[i].length;
}
return size;
}
}
| 9,016 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/ByteArrayOrdinalMap.java | /*
* Copyright 2016-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.memory;
import com.netflix.hollow.core.memory.encoding.HashCodes;
import com.netflix.hollow.core.memory.encoding.VarInt;
import com.netflix.hollow.core.memory.pool.WastefulRecycler;
import java.util.Arrays;
import java.util.BitSet;
import java.util.concurrent.atomic.AtomicLongArray;
/**
* This data structure maps byte sequences to ordinals. This is a hash table.
* <p>
* The <code>pointersAndOrdinals</code> AtomicLongArray contains keys, and the {@link ByteDataArray}
* contains values. Each key has two components.
* <p>
* The high 29 bits in the key represents the ordinal. The low 35 bits represents the pointer to the start position
* of the byte sequence in the ByteDataBuffer. Each byte sequence is preceded by a variable-length integer
* (see {@link VarInt}), indicating the length of the sequence.<p>
*
* @author dkoszewnik
*/
public class ByteArrayOrdinalMap {
private static final long EMPTY_BUCKET_VALUE = -1L;
private static final int BITS_PER_ORDINAL = 29;
private static final int BITS_PER_POINTER = Long.SIZE - BITS_PER_ORDINAL;
private static final long POINTER_MASK = (1L << BITS_PER_POINTER) - 1;
private static final long ORDINAL_MASK = (1L << BITS_PER_ORDINAL) - 1;
private static final long MAX_BYTE_DATA_LENGTH = 1L << BITS_PER_POINTER;
/// Thread safety: We need volatile access semantics to the individual elements in the
/// pointersAndOrdinals array.
/// Ordinal is the high 29 bits. Pointer to byte data is the low 35 bits.
/// In addition need volatile access to the reference when resize occurs
private volatile AtomicLongArray pointersAndOrdinals;
private final ByteDataArray byteData;
private final FreeOrdinalTracker freeOrdinalTracker;
private int size;
private int sizeBeforeGrow;
private BitSet unusedPreviousOrdinals;
private long[] pointersByOrdinal;
/**
* Creates a byte array ordinal map with a an initial capacity of 256 elements,
* and a load factor of 70%.
*/
public ByteArrayOrdinalMap() {
this(256);
}
/**
* Creates a byte array ordinal map with an initial capacity of a given size
* rounded up to the nearest power of two, and a load factor of 70%.
*/
public ByteArrayOrdinalMap(int size) {
size = bucketSize(size);
this.freeOrdinalTracker = new FreeOrdinalTracker();
this.byteData = new ByteDataArray(WastefulRecycler.DEFAULT_INSTANCE);
this.pointersAndOrdinals = emptyKeyArray(size);
this.sizeBeforeGrow = (int) (((float) size) * 0.7); /// 70% load factor
this.size = 0;
}
private static int bucketSize(int x) {
// See Hackers Delight Fig. 3-3
x = x - 1;
x = x | (x >> 1);
x = x | (x >> 2);
x = x | (x >> 4);
x = x | (x >> 8);
x = x | (x >> 16);
return (x < 256) ? 256 : (x >= 1 << 30) ? 1 << 30 : x + 1;
}
public int getOrAssignOrdinal(ByteDataArray serializedRepresentation) {
return getOrAssignOrdinal(serializedRepresentation, -1);
}
/**
* Adds a sequence of bytes to this map. If the sequence of bytes has previously been added
* to this map then its assigned ordinal is returned.
* If the sequence of bytes has not been added to this map then a new ordinal is assigned
* and returned.
* <p>
* This operation is thread-safe.
*
* @param serializedRepresentation the sequence of bytes
* @param preferredOrdinal the preferred ordinal to assign, if not already assigned to
* another sequence of bytes and the given sequence of bytes has not previously been added
* @return the assigned ordinal
*/
public int getOrAssignOrdinal(ByteDataArray serializedRepresentation, int preferredOrdinal) {
int hash = HashCodes.hashCode(serializedRepresentation);
int ordinal = get(serializedRepresentation, hash);
return ordinal != -1 ? ordinal : assignOrdinal(serializedRepresentation, hash, preferredOrdinal);
}
/// acquire the lock before writing.
private synchronized int assignOrdinal(ByteDataArray serializedRepresentation, int hash, int preferredOrdinal) {
if (preferredOrdinal < -1 || preferredOrdinal > ORDINAL_MASK) {
throw new IllegalArgumentException(String.format(
"The given preferred ordinal %s is out of bounds and not within the closed interval [-1, %s]",
preferredOrdinal, ORDINAL_MASK));
}
if (size > sizeBeforeGrow) {
growKeyArray();
}
/// check to make sure that after acquiring the lock, the element still does not exist.
/// this operation is akin to double-checked locking which is 'fixed' with the JSR 133 memory model in JVM >= 1.5.
/// Note that this also requires pointersAndOrdinals be volatile so resizes are also visible
AtomicLongArray pao = pointersAndOrdinals;
int modBitmask = pao.length() - 1;
int bucket = hash & modBitmask;
long key = pao.get(bucket);
while (key != EMPTY_BUCKET_VALUE) {
if (compare(serializedRepresentation, key)) {
return (int) (key >>> BITS_PER_POINTER);
}
bucket = (bucket + 1) & modBitmask;
key = pao.get(bucket);
}
/// the ordinal for this object still does not exist in the list, even after the lock has been acquired.
/// it is up to this thread to add it at the current bucket position.
int ordinal = findFreeOrdinal(preferredOrdinal);
if (ordinal > ORDINAL_MASK) {
throw new IllegalStateException(String.format(
"Ordinal cannot be assigned. The to be assigned ordinal, %s, is greater than the maximum supported ordinal value of %s",
ordinal, ORDINAL_MASK));
}
long pointer = byteData.length();
VarInt.writeVInt(byteData, (int) serializedRepresentation.length());
/// Copying might cause a resize to the segmented array held by byteData
/// A reading thread may observe a null value for a segment during the creation
/// of a new segments array (see SegmentedByteArray.ensureCapacity).
serializedRepresentation.copyTo(byteData);
if (byteData.length() > MAX_BYTE_DATA_LENGTH) {
throw new IllegalStateException(String.format(
"The number of bytes for the serialized representations, %s, is too large and is greater than the maximum of %s bytes",
byteData.length(), MAX_BYTE_DATA_LENGTH));
}
key = ((long) ordinal << BITS_PER_POINTER) | pointer;
size++;
/// this set on the AtomicLongArray has volatile semantics (i.e. behaves like a monitor release).
/// Any other thread reading this element in the AtomicLongArray will have visibility to all memory writes this thread has made up to this point.
/// This means the entire byte sequence is guaranteed to be visible to any thread which reads the pointer to that data.
pao.set(bucket, key);
return ordinal;
}
/**
* If the preferredOrdinal has not already been used, mark it and use it. Otherwise,
* delegate to the FreeOrdinalTracker.
*/
private int findFreeOrdinal(int preferredOrdinal) {
if (preferredOrdinal != -1 && unusedPreviousOrdinals.get(preferredOrdinal)) {
unusedPreviousOrdinals.clear(preferredOrdinal);
return preferredOrdinal;
}
return freeOrdinalTracker.getFreeOrdinal();
}
/**
* Assign a predefined ordinal to a serialized representation.<p>
* <p>
* WARNING: THIS OPERATION IS NOT THREAD-SAFE.<p>
* WARNING: THIS OPERATION WILL NOT UPDATE THE FreeOrdinalTracker.
*
* @param serializedRepresentation the serialized representation
* @param ordinal the ordinal
*/
public void put(ByteDataArray serializedRepresentation, int ordinal) {
if (ordinal < 0 || ordinal > ORDINAL_MASK) {
throw new IllegalArgumentException(String.format(
"The given ordinal %s is out of bounds and not within the closed interval [0, %s]",
ordinal, ORDINAL_MASK));
}
if (size > sizeBeforeGrow) {
growKeyArray();
}
int hash = HashCodes.hashCode(serializedRepresentation);
AtomicLongArray pao = pointersAndOrdinals;
int modBitmask = pao.length() - 1;
int bucket = hash & modBitmask;
long key = pao.get(bucket);
while (key != EMPTY_BUCKET_VALUE) {
bucket = (bucket + 1) & modBitmask;
key = pao.get(bucket);
}
long pointer = byteData.length();
VarInt.writeVInt(byteData, (int) serializedRepresentation.length());
serializedRepresentation.copyTo(byteData);
if (byteData.length() > MAX_BYTE_DATA_LENGTH) {
throw new IllegalStateException(String.format(
"The number of bytes for the serialized representations, %s, is too large and is greater than the maximum of %s bytes",
byteData.length(), MAX_BYTE_DATA_LENGTH));
}
key = ((long) ordinal << BITS_PER_POINTER) | pointer;
size++;
pao.set(bucket, key);
}
public void recalculateFreeOrdinals() {
BitSet populatedOrdinals = new BitSet();
AtomicLongArray pao = pointersAndOrdinals;
for (int i = 0; i < pao.length(); i++) {
long key = pao.get(i);
if (key != EMPTY_BUCKET_VALUE) {
int ordinal = (int) (key >>> BITS_PER_POINTER);
populatedOrdinals.set(ordinal);
}
}
recalculateFreeOrdinals(populatedOrdinals);
}
public void reservePreviouslyPopulatedOrdinals(BitSet populatedOrdinals) {
unusedPreviousOrdinals = BitSet.valueOf(populatedOrdinals.toLongArray());
recalculateFreeOrdinals(populatedOrdinals);
}
private void recalculateFreeOrdinals(BitSet populatedOrdinals) {
freeOrdinalTracker.reset();
int length = populatedOrdinals.length();
int ordinal = populatedOrdinals.nextClearBit(0);
while (ordinal < length) {
freeOrdinalTracker.returnOrdinalToPool(ordinal);
ordinal = populatedOrdinals.nextClearBit(ordinal + 1);
}
freeOrdinalTracker.setNextEmptyOrdinal(length);
}
public BitSet getUnusedPreviousOrdinals() {
return unusedPreviousOrdinals;
}
/**
* Returns the ordinal for a previously added byte sequence. If this byte sequence has not been added to the map, then -1 is returned.<p>
* <p>
* This is intended for use in the client-side heap-safe double snapshot load.
*
* @param serializedRepresentation the serialized representation
* @return The ordinal for this serialized representation, or -1.
*/
public int get(ByteDataArray serializedRepresentation) {
return get(serializedRepresentation, HashCodes.hashCode(serializedRepresentation));
}
private int get(ByteDataArray serializedRepresentation, int hash) {
AtomicLongArray pao = pointersAndOrdinals;
int modBitmask = pao.length() - 1;
int bucket = hash & modBitmask;
long key = pao.get(bucket);
// Linear probing to resolve collisions
// Given the load factor it is guaranteed that the loop will terminate
// as there will be at least one empty bucket
// To ensure this is the case it is important that pointersAndOrdinals
// is read into a local variable and thereafter used, otherwise a concurrent
// size increase may break this invariant
while (key != EMPTY_BUCKET_VALUE) {
if (compare(serializedRepresentation, key)) {
return (int) (key >>> BITS_PER_POINTER);
}
bucket = (bucket + 1) & modBitmask;
key = pao.get(bucket);
}
return -1;
}
/**
* Create an array mapping the ordinals to pointers, so that they can be easily looked up
* when writing to blob streams.
*/
public void prepareForWrite() {
int maxOrdinal = 0;
AtomicLongArray pao = pointersAndOrdinals;
for (int i = 0; i < pao.length(); i++) {
long key = pao.get(i);
if (key != EMPTY_BUCKET_VALUE) {
int ordinal = (int) (key >>> BITS_PER_POINTER);
if (ordinal > maxOrdinal) {
maxOrdinal = ordinal;
}
}
}
long[] pbo = new long[maxOrdinal + 1];
Arrays.fill(pbo, -1);
for (int i = 0; i < pao.length(); i++) {
long key = pao.get(i);
if (key != EMPTY_BUCKET_VALUE) {
int ordinal = (int) (key >>> BITS_PER_POINTER);
pbo[ordinal] = key & POINTER_MASK;
}
}
pointersByOrdinal = pbo;
}
/**
* Reclaim space in the byte array used in the previous cycle, but not referenced in this cycle.<p>
* <p>
* This is achieved by shifting all used byte sequences down in the byte array, then updating
* the key array to reflect the new pointers and exclude the removed entries. This is also where ordinals
* which are unused are returned to the pool.<p>
*
* @param usedOrdinals a bit set representing the ordinals which are currently referenced by any image.
*/
public void compact(ThreadSafeBitSet usedOrdinals, int numShards, boolean focusHoleFillInFewestShards) {
long[] populatedReverseKeys = new long[size];
int counter = 0;
AtomicLongArray pao = pointersAndOrdinals;
for (int i = 0; i < pao.length(); i++) {
long key = pao.get(i);
if (key != EMPTY_BUCKET_VALUE) {
populatedReverseKeys[counter++] = key << BITS_PER_ORDINAL | key >>> BITS_PER_POINTER;
}
}
Arrays.sort(populatedReverseKeys);
SegmentedByteArray arr = byteData.getUnderlyingArray();
long currentCopyPointer = 0;
for (int i = 0; i < populatedReverseKeys.length; i++) {
int ordinal = (int) (populatedReverseKeys[i] & ORDINAL_MASK);
if (usedOrdinals.get(ordinal)) {
long pointer = populatedReverseKeys[i] >>> BITS_PER_ORDINAL;
int length = VarInt.readVInt(arr, pointer);
length += VarInt.sizeOfVInt(length);
if (currentCopyPointer != pointer) {
arr.copy(arr, pointer, currentCopyPointer, length);
}
populatedReverseKeys[i] = populatedReverseKeys[i] << BITS_PER_POINTER | currentCopyPointer;
currentCopyPointer += length;
} else {
freeOrdinalTracker.returnOrdinalToPool(ordinal);
populatedReverseKeys[i] = EMPTY_BUCKET_VALUE;
}
}
byteData.setPosition(currentCopyPointer);
if(focusHoleFillInFewestShards && numShards > 1)
freeOrdinalTracker.sort(numShards);
else
freeOrdinalTracker.sort();
// Reset the array then fill with compacted values
// Volatile store not required, could use plain store
// See VarHandles for JDK >= 9
for (int i = 0; i < pao.length(); i++) {
pao.lazySet(i, EMPTY_BUCKET_VALUE);
}
populateNewHashArray(pao, populatedReverseKeys);
size = usedOrdinals.cardinality();
pointersByOrdinal = null;
unusedPreviousOrdinals = null;
}
public long getPointerForData(int ordinal) {
long pointer = pointersByOrdinal[ordinal] & POINTER_MASK;
return pointer + VarInt.nextVLongSize(byteData.getUnderlyingArray(), pointer);
}
public boolean isReadyForWriting() {
return pointersByOrdinal != null;
}
public boolean isReadyForAddingObjects() {
return pointersByOrdinal == null;
}
public long getDataSize() {
return byteData.length();
}
public int maxOrdinal() {
int maxOrdinal = -1;
AtomicLongArray pao = pointersAndOrdinals;
for (int i = 0; i < pao.length(); i++) {
long key = pao.get(i);
if (key != EMPTY_BUCKET_VALUE) {
int ordinal = (int) (key >>> BITS_PER_POINTER);
if (ordinal > maxOrdinal) {
maxOrdinal = ordinal;
}
}
}
return maxOrdinal;
}
/**
* Compare the byte sequence contained in the supplied ByteDataBuffer with the
* sequence contained in the map pointed to by the specified key, byte by byte.
*/
private boolean compare(ByteDataArray serializedRepresentation, long key) {
long position = key & POINTER_MASK;
int sizeOfData = VarInt.readVInt(byteData.getUnderlyingArray(), position);
if (sizeOfData != serializedRepresentation.length()) {
return false;
}
position += VarInt.sizeOfVInt(sizeOfData);
for (int i = 0; i < sizeOfData; i++) {
if (serializedRepresentation.get(i) != byteData.get(position++)) {
return false;
}
}
return true;
}
/**
* Resize the ordinal map by increasing its capacity.
* <p>
* No action is take if the current capacity is sufficient for the given size.
* <p>
* WARNING: THIS OPERATION IS NOT THREAD-SAFE.
*
* @param size the size to increase to, rounded up to the nearest power of two.
*/
public void resize(int size) {
size = bucketSize(size);
if (pointersAndOrdinals.length() < size) {
growKeyArray(size);
}
}
/**
* Grow the key array. All of the values in the current array must be re-hashed and added to the new array.
*/
private void growKeyArray() {
int newSize = pointersAndOrdinals.length() << 1;
if (newSize < 0) {
throw new IllegalStateException("New size computed to grow the underlying array for the map is negative. " +
"This is most likely due to the total number of keys added to map has exceeded the max capacity of the keys map can hold. "
+
"Current array size :" + pointersAndOrdinals.length() + " and size to grow :" + newSize);
}
growKeyArray(newSize);
}
private void growKeyArray(int newSize) {
AtomicLongArray pao = pointersAndOrdinals;
assert (newSize & (newSize - 1)) == 0; // power of 2
assert pao.length() < newSize;
AtomicLongArray newKeys = emptyKeyArray(newSize);
long[] valuesToAdd = new long[size];
int counter = 0;
/// do not iterate over these values in the same order in which they appear in the hashed array.
/// if we do so, we cause large clusters of collisions to appear (because we resolve collisions with linear probing).
for (int i = 0; i < pao.length(); i++) {
long key = pao.get(i);
if (key != EMPTY_BUCKET_VALUE) {
valuesToAdd[counter++] = key;
}
}
Arrays.sort(valuesToAdd);
populateNewHashArray(newKeys, valuesToAdd, counter);
/// 70% load factor
sizeBeforeGrow = (int) (((float) newSize) * 0.7);
pointersAndOrdinals = newKeys;
}
/**
* Hash all of the existing values specified by the keys in the supplied long array
* into the supplied AtomicLongArray.
*/
private void populateNewHashArray(AtomicLongArray newKeys, long[] valuesToAdd) {
populateNewHashArray(newKeys, valuesToAdd, valuesToAdd.length);
}
private void populateNewHashArray(AtomicLongArray newKeys, long[] valuesToAdd, int length) {
assert length <= valuesToAdd.length;
int modBitmask = newKeys.length() - 1;
for (int i = 0; i < length; i++) {
long value = valuesToAdd[i];
if (value != EMPTY_BUCKET_VALUE) {
int hash = rehashPreviouslyAddedData(value);
int bucket = hash & modBitmask;
while (newKeys.get(bucket) != EMPTY_BUCKET_VALUE) {
bucket = (bucket + 1) & modBitmask;
}
// Volatile store not required, could use plain store
// See VarHandles for JDK >= 9
newKeys.lazySet(bucket, value);
}
}
}
/**
* Get the hash code for the byte array pointed to by the specified key.
*/
private int rehashPreviouslyAddedData(long key) {
long position = key & POINTER_MASK;
int sizeOfData = VarInt.readVInt(byteData.getUnderlyingArray(), position);
position += VarInt.sizeOfVInt(sizeOfData);
return HashCodes.hashCode(byteData.getUnderlyingArray(), position, sizeOfData);
}
/**
* Create an AtomicLongArray of the specified size, each value in the array will be EMPTY_BUCKET_VALUE
*/
private AtomicLongArray emptyKeyArray(int size) {
AtomicLongArray arr = new AtomicLongArray(size);
// Volatile store not required, could use plain store
// See VarHandles for JDK >= 9
for (int i = 0; i < arr.length(); i++) {
arr.lazySet(i, EMPTY_BUCKET_VALUE);
}
return arr;
}
public ByteDataArray getByteData() {
return byteData;
}
public AtomicLongArray getPointersAndOrdinals() {
return pointersAndOrdinals;
}
public static boolean isPointerAndOrdinalEmpty(long pointerAndOrdinal) {
return pointerAndOrdinal == EMPTY_BUCKET_VALUE;
}
public static long getPointer(long pointerAndOrdinal) {
return pointerAndOrdinal & POINTER_MASK;
}
public static int getOrdinal(long pointerAndOrdinal) {
return (int) (pointerAndOrdinal >>> BITS_PER_POINTER);
}
} | 9,017 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/HollowUnsafeHandle.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.memory;
import java.lang.reflect.Field;
import java.util.logging.Level;
import java.util.logging.Logger;
import sun.misc.Unsafe;
@SuppressWarnings("restriction")
public class HollowUnsafeHandle {
private static final Logger log = Logger.getLogger(HollowUnsafeHandle.class.getName());
private static final Unsafe unsafe;
static {
Field theUnsafe;
Unsafe u = null;
try {
theUnsafe = Unsafe.class.getDeclaredField("theUnsafe");
theUnsafe.setAccessible(true);
u = (Unsafe) theUnsafe.get(null);
} catch (Exception e) {
log.log(Level.SEVERE, "Unsafe access failed", e);
}
unsafe = u;
}
public static Unsafe getUnsafe() {
return unsafe;
}
}
| 9,018 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/ArrayByteData.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.memory;
/**
* A ByteData backed by a simple array of bytes.
*
* @author dkoszewnik
*
*/
public class ArrayByteData implements ByteData {
private final byte[] data;
public ArrayByteData(byte[] data) {
this.data = data;
}
@Override
public byte get(long position) {
return data[(int)position];
}
@Override
public long length() {
return data.length;
}
}
| 9,019 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/ByteDataArray.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.memory;
import com.netflix.hollow.core.memory.pool.ArraySegmentRecycler;
import com.netflix.hollow.core.memory.pool.WastefulRecycler;
/**
* Writes data to a {@link SegmentedByteArray}, tracking the index to which it writes.
*
* @author dkoszewnik
*
*/
public class ByteDataArray {
private final SegmentedByteArray buf;
private long position;
public ByteDataArray() {
this(WastefulRecycler.DEFAULT_INSTANCE);
}
public ByteDataArray(ArraySegmentRecycler memoryRecycler) {
buf = new SegmentedByteArray(memoryRecycler);
}
public void write(byte b) {
buf.set(position++, b);
}
public void reset() {
position = 0;
}
public void setPosition(long position) {
this.position = position;
}
public long length() {
return position;
}
public void copyTo(ByteDataArray other) {
other.buf.copy(buf, 0, other.position, position);
other.position += position;
}
public void copyFrom(ByteData data, long startPosition, int length) {
buf.copy(data, startPosition, position, length);
position += length;
}
public void copyFrom(SegmentedByteArray data, long startPosition, int length) {
buf.copy(data, startPosition, position, length);
position += length;
}
public byte get(long index) {
return buf.get(index);
}
public SegmentedByteArray getUnderlyingArray() {
return buf;
}
}
| 9,020 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/SegmentedLongArray.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.memory;
import com.netflix.hollow.core.memory.encoding.VarInt;
import com.netflix.hollow.core.memory.pool.ArraySegmentRecycler;
import com.netflix.hollow.core.read.HollowBlobInput;
import java.io.DataOutputStream;
import java.io.IOException;
import sun.misc.Unsafe;
/**
* A segmented long array can grow without allocating successively larger blocks and copying memory.<p>
*
* Segment length is always a power of two so that the location of a given index can be found with mask and shift operations.<p>
*
* Conceptually this can be thought of as a single long array of undefined length. The currently allocated buffer will always be
* a multiple of the size of the segments. The buffer will grow automatically when a byte is written to an index greater than the
* currently allocated buffer.
*
* @author dkoszewnik
*
*/
@SuppressWarnings("restriction")
public class SegmentedLongArray {
private static final Unsafe unsafe = HollowUnsafeHandle.getUnsafe();
protected final long[][] segments;
protected final int log2OfSegmentSize;
protected final int bitmask;
public SegmentedLongArray(ArraySegmentRecycler memoryRecycler, long numLongs) {
this.log2OfSegmentSize = memoryRecycler.getLog2OfLongSegmentSize();
int numSegments = (int)((numLongs - 1) >>> log2OfSegmentSize) + 1;
long[][] segments = new long[numSegments][];
this.bitmask = (1 << log2OfSegmentSize) - 1;
for(int i=0;i<segments.length;i++) {
segments[i] = memoryRecycler.getLongArray();
}
/// The following assignment is purposefully placed *after* the population of all segments.
/// The final assignment after the initialization of the array guarantees that no thread
/// will see any of the array elements before assignment.
/// We can't risk the segment values being visible as null to any thread, because
/// FixedLengthElementArray uses Unsafe to access these values, which would cause the
/// JVM to crash with a segmentation fault.
this.segments = segments;
}
/**
* Set the long at the given index to the specified value
*
* @param index the index (eg. the long at index 0 occupies bytes 0-7, long at index 1 occupies bytes 8-15, etc.)
* @param value the long value
*/
public void set(long index, long value) {
int segmentIndex = (int)(index >> log2OfSegmentSize);
int longInSegment = (int)(index & bitmask);
unsafe.putLong(segments[segmentIndex], (long) Unsafe.ARRAY_LONG_BASE_OFFSET + (8 * longInSegment), value);
/// duplicate the longs here so that we can read faster.
if(longInSegment == 0 && segmentIndex != 0) {
unsafe.putLong(segments[segmentIndex - 1], (long) Unsafe.ARRAY_LONG_BASE_OFFSET + (8 * (1 << log2OfSegmentSize)), value);
}
}
/**
* Get the value of the long at the specified index.
*
* @param index the index (eg. the long at index 0 occupies bytes 0-7, long at index 1 occupies bytes 8-15, etc.)
* @return the long value
*/
public long get(long index) {
int segmentIndex = (int)(index >>> log2OfSegmentSize);
long ret = segments[segmentIndex][(int)(index & bitmask)];
return ret;
}
public void fill(long value) {
for(int i=0;i<segments.length;i++) {
long offset = Unsafe.ARRAY_LONG_BASE_OFFSET;
for(int j=0;j<segments[i].length;j++) {
unsafe.putLong(segments[i], offset, value);
offset += 8;
}
}
}
public void writeTo(DataOutputStream dos, long numLongs) throws IOException {
VarInt.writeVLong(dos, numLongs);
for(long i=0;i<numLongs;i++) {
dos.writeLong(get(i));
}
}
public void destroy(ArraySegmentRecycler memoryRecycler) {
for(int i=0;i<segments.length;i++) {
if(segments[i] != null)
memoryRecycler.recycleLongArray(segments[i]);
}
}
protected void readFrom(HollowBlobInput in, ArraySegmentRecycler memoryRecycler, long numLongs) throws
IOException {
int segmentSize = 1 << memoryRecycler.getLog2OfLongSegmentSize();
int segment = 0;
if(numLongs == 0)
return;
long fencepostLong = in.readLong();
while(numLongs > 0) {
long longsToCopy = Math.min(segmentSize, numLongs);
unsafe.putLong(segments[segment], (long) Unsafe.ARRAY_LONG_BASE_OFFSET, fencepostLong);
int longsCopied = 1;
while(longsCopied < longsToCopy) {
long l = in.readLong();
unsafe.putLong(segments[segment], (long) Unsafe.ARRAY_LONG_BASE_OFFSET + (8 * longsCopied++), l);
}
if(numLongs > longsCopied) {
unsafe.putLong(segments[segment], (long) Unsafe.ARRAY_LONG_BASE_OFFSET + (8 * longsCopied), in.readLong());
fencepostLong = segments[segment][longsCopied];
}
segment++;
numLongs -= longsCopied;
}
}
}
| 9,021 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/EncodedByteBuffer.java | /*
* Copyright 2016-2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.memory;
import com.netflix.hollow.core.memory.encoding.BlobByteBuffer;
import com.netflix.hollow.core.read.HollowBlobInput;
import java.io.IOException;
/**
* {@code BlobByteBuffer} based implementation of variable length byte data that only supports read.
*/
public class EncodedByteBuffer implements VariableLengthData {
private BlobByteBuffer bufferView;
private long size;
public EncodedByteBuffer() {
this.size = 0;
}
@Override
public byte get(long index) {
if (index >= this.size) {
throw new IllegalStateException();
}
byte retVal = this.bufferView.getByte(this.bufferView.position() + index);
return retVal;
}
/**
* {@inheritDoc}
* This is achieved by initializing a {@code BlobByteBuffer} that is a view on the underlying {@code BlobByteBuffer}
* and advancing the position of the underlying buffer by <i>length</i> bytes.
*/
@Override
public void loadFrom(HollowBlobInput in, long length) throws IOException {
BlobByteBuffer buffer = in.getBuffer();
this.size = length;
buffer.position(in.getFilePointer());
this.bufferView = buffer.duplicate();
buffer.position(buffer.position() + length);
in.seek(in.getFilePointer() + length);
}
@Override
public void copy(ByteData src, long srcPos, long destPos, long length) {
throw new UnsupportedOperationException("Operation not supported in shared-memory mode");
}
@Override
public void orderedCopy(VariableLengthData src, long srcPos, long destPos, long length) {
throw new UnsupportedOperationException("Operation not supported in shared-memory mode");
}
@Override
public long size() {
return size;
}
}
| 9,022 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/encoding/FixedLengthMultipleOccurrenceElementArray.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.memory.encoding;
import com.netflix.hollow.core.memory.pool.ArraySegmentRecycler;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Logger;
import java.util.stream.LongStream;
/**
* This class stores multiple instances of a fixed bit width element in a list of nodes. For
* example, it can store multiple 6 bit elements at different indices of the list of nodes.
*
* Under the hood, it uses a {@link FixedLengthElementArray} in order to implement compact storage
* that allows inserting multiple instances of the fixed length elements. It maintains at least
* enough space to hold up to the max number of occurrences of each element. This means that if
* there are 8 elements at one node index, it will contain enough space to store at least 8 elements
* at each index.
*
* Running out of space at any index triggers a relatively expensive resize operation, where we
* create storage of a multiple (currently 1.5x) of the previous storage and copy items over, but
* this can be alleviated by passing in a better guess for max elements per node.
* Note that this class is currently designed to be used for a relatively small number for
* bitsPerElement - it will not work for bitsPerElement greater than 60.
*/
public class FixedLengthMultipleOccurrenceElementArray {
private static final Logger LOG = Logger.getLogger(FixedLengthMultipleOccurrenceElementArray.class.getName());
private static final double RESIZE_MULTIPLE = 1.5;
private static final long NO_ELEMENT = 0L;
private final ArraySegmentRecycler memoryRecycler;
private final FixedLengthElementArray nodesWithOrdinalZero;
private final int bitsPerElement;
private final long elementMask;
private final long numNodes;
private volatile FixedLengthElementArray storage;
private volatile int maxElementsPerNode;
public FixedLengthMultipleOccurrenceElementArray(ArraySegmentRecycler memoryRecycler,
long numNodes, int bitsPerElement, int maxElementsPerNodeEstimate) {
this.nodesWithOrdinalZero = new FixedLengthElementArray(memoryRecycler, numNodes);
this.storage = new FixedLengthElementArray(
memoryRecycler, numNodes * bitsPerElement * maxElementsPerNodeEstimate);
this.memoryRecycler = memoryRecycler;
this.bitsPerElement = bitsPerElement;
this.elementMask = (1L << bitsPerElement) - 1;
this.numNodes = numNodes;
this.maxElementsPerNode = maxElementsPerNodeEstimate;
}
/**
* This method adds an element at nodeIndex. Note that this does not check for duplicates; if
* the element already exists, another instance of it will be added.
* This method is not thread-safe - you cannot call this method concurrently with itself or with
* {@link #getElements}.
*
* @param nodeIndex the node index
* @param element the element to add
*/
public void addElement(long nodeIndex, long element) {
if (element > elementMask) {
throw new IllegalArgumentException("Element " + element + " does not fit in "
+ bitsPerElement + " bits");
}
if (nodeIndex >= numNodes) {
throw new IllegalArgumentException("Provided nodeIndex " + nodeIndex
+ " greater then numNodes " + numNodes);
}
if (element == NO_ELEMENT) {
// we use 0 to indicate an "empty" element, so we have to store ordinal zero here
nodesWithOrdinalZero.setElementValue(nodeIndex, 1, 1);
return;
}
long bucketStart = nodeIndex * maxElementsPerNode * bitsPerElement;
long currentIndex;
int offset = 0;
do {
currentIndex = bucketStart + offset * bitsPerElement;
offset++;
} while (storage.getElementValue(currentIndex, bitsPerElement, elementMask) != NO_ELEMENT
&& offset < maxElementsPerNode);
if (storage.getElementValue(currentIndex, bitsPerElement, elementMask) != NO_ELEMENT) {
LOG.fine("Invoking resizeElementsPerNode when adding element=" + element);
// we're full at this index - resize, then figure out the new current index
resizeElementsPerNode();
currentIndex = nodeIndex * maxElementsPerNode * bitsPerElement + offset * bitsPerElement;
}
/* we're adding to the first empty spot from the beginning of the bucket - this is
* preferable to adding at the end because we want our getElements method to be fast, and
* it's okay for addElement to be comparatively slow */
storage.setElementValue(currentIndex, bitsPerElement, element);
}
/**
* Return a list of elements at the specified node index. The returned list may contain
* duplicates.
* This method not thread-safe - the caller must ensure that no one calls {@link #addElement}
* concurrently with this method, but calling this method concurrently with itself is safe.
*
* @param nodeIndex the node index
* @return a list of element at the node index, or null if nodeIndex is negative
*/
public List<Long> getElements(long nodeIndex) {
if (nodeIndex < 0) {
return null;
}
long bucketStart = nodeIndex * maxElementsPerNode * bitsPerElement;
List<Long> ret = new ArrayList<>();
if (nodesWithOrdinalZero.getElementValue(nodeIndex, 1, 1) != NO_ELEMENT) {
// 0 indicates an "empty" element, so we fetch ordinal zeros from nodesWithOrdinalZero
ret.add(NO_ELEMENT);
}
for (int offset = 0; offset < maxElementsPerNode; offset++) {
long element = storage.getElementValue(bucketStart + offset * bitsPerElement,
bitsPerElement, elementMask);
if (element == NO_ELEMENT) {
break; // we have exhausted the elements at this index
}
ret.add(element);
}
return ret;
}
/**
* A destructor function - call to free up the underlying memory.
*/
public void destroy() {
storage.destroy(memoryRecycler);
}
/**
* Resize the underlying storage to a multiple of what it currently is. This method is not thread-safe.
*/
private void resizeElementsPerNode() {
LOG.warning("Dynamically resizing no. of elements per node is an expensive operation, it can be avoided by specifying a better estimate upfront");
int currentElementsPerNode = maxElementsPerNode;
int newElementsPerNode = (int) Math.ceil((double) currentElementsPerNode * RESIZE_MULTIPLE);
if (newElementsPerNode <= currentElementsPerNode) {
throw new IllegalStateException("cannot resize fixed length array from "
+ currentElementsPerNode + " to " + newElementsPerNode);
}
long numBits = numNodes * bitsPerElement * newElementsPerNode;
LOG.fine(String.format("Resizing storage: oldStorage=%sbytes, newStorage=%sbits/%sbytes (numNodes=%s bitsPerElement=%s newElementsPerNode=%s)",
storage.approxHeapFootprintInBytes(), numBits, numBits/8, numNodes, bitsPerElement, newElementsPerNode));
FixedLengthElementArray newStorage = new FixedLengthElementArray(memoryRecycler,
numNodes * bitsPerElement * newElementsPerNode);
LongStream.range(0, numNodes).forEach(nodeIndex -> {
long currentBucketStart = nodeIndex * currentElementsPerNode * bitsPerElement;
long newBucketStart = nodeIndex * newElementsPerNode * bitsPerElement;
for (int offset = 0; offset < currentElementsPerNode; offset++) {
long element = storage.getElementValue(currentBucketStart + offset * bitsPerElement,
bitsPerElement, elementMask);
if (element == NO_ELEMENT) {
break; // we have exhausted the elements at this index
}
newStorage.setElementValue(
newBucketStart + offset * bitsPerElement, bitsPerElement, element);
}
});
storage.destroy(memoryRecycler);
storage = newStorage;
maxElementsPerNode = newElementsPerNode;
}
public int getMaxElementsPerNode() {
return maxElementsPerNode;
}
public long approxHeapFootprintInBytes() {
return storage.approxHeapFootprintInBytes()
+ nodesWithOrdinalZero.approxHeapFootprintInBytes();
}
}
| 9,023 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/encoding/ZigZag.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.memory.encoding;
import com.netflix.hollow.core.schema.HollowObjectSchema.FieldType;
/**
* Zig-zag encoding. Used to encode {@link FieldType#INT} and {@link FieldType#LONG} because smaller absolute
* values can be encoded using fewer bits.
*/
public class ZigZag {
public static long encodeLong(long l) {
return (l << 1) ^ (l >> 63);
}
public static long decodeLong(long l) {
return (l >>> 1) ^ ((l << 63) >> 63);
}
public static int encodeInt(int i) {
return (i << 1) ^ (i >> 31);
}
public static int decodeInt(int i) {
return (i >>> 1) ^ ((i << 31) >> 31);
}
}
| 9,024 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/encoding/EncodedLongBuffer.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.memory.encoding;
import static java.lang.Math.ceil;
import com.netflix.hollow.core.memory.FixedLengthData;
import com.netflix.hollow.core.read.HollowBlobInput;
import java.io.IOException;
/**
* This class allows for storage and retrieval of fixed-length data in ByteBuffers. As a result there two ways to obtain
* an element value from the bit string at a given bit index.
* <br><br>
* {@link #getElementValue(long, int)} or {@link #getElementValue(long, int, long)}: at byte index offsets within
* the buffers.
* <br><br>
* {@link #getLargeElementValue(long, int)} or {@link #getLargeElementValue(long, int, long)}: by reading two long
* values and then composing an element value from bits that cover the two.
* <br><br>
* In the counterpart {@link FixedLengthElementArray} implementation a long read into the last 8 bytes of data was safe
* because of a padding of 1 long at the end. Instead, this implementation returns a zero byte if the 8 byte range past
* the buffer capacity is queried.
* <br><br>
* {@link #getElementValue} can only support element values of 58 bits or less. This is because reading values that are
* unaligned with byte boundaries requires shifting by the number of bits the address is offset by within a byte. For
* 58 bit values, the offset from a byte boundary can be as high as 6 bits. 58 bits can be shifted 6 bits and still fit
* within the 64 bit space. For 59 bit values the offset from a byte boundary can be as high as 7 bits. Shifting a
* 59 bit value by 6 or 7 bits will both overflow the 64 bit space, resulting in an invalid value when reading.
*/
@SuppressWarnings("restriction")
public class EncodedLongBuffer implements FixedLengthData {
private BlobByteBuffer bufferView;
private long maxByteIndex = -1;
public EncodedLongBuffer() {}
/**
* Returns a new EncodedLongBuffer from deserializing the given input. The value of the first variable length integer
* in the input indicates how many long values are to then be read from the input.
*
* @param in Hollow Blob Input to read data (a var int and then that many longs) from
* @return new EncodedLongBuffer containing data read from input
*/
public static EncodedLongBuffer newFrom(HollowBlobInput in) throws IOException {
long numLongs = VarInt.readVLong(in);
return newFrom(in, numLongs);
}
/**
* Returns a new EncodedLongBuffer from deserializing numLongs longs from given input.
*
* @param in Hollow Blob Input to read numLongs longs from
* @return new EncodedLongBuffer containing data read from input
*/
public static EncodedLongBuffer newFrom(HollowBlobInput in, long numLongs) throws IOException {
EncodedLongBuffer buf = new EncodedLongBuffer();
buf.loadFrom(in, numLongs);
return buf;
}
private void loadFrom(HollowBlobInput in, long numLongs) throws IOException {
BlobByteBuffer buffer = in.getBuffer();
if(numLongs == 0)
return;
this.maxByteIndex = (numLongs * Long.BYTES) - 1;
buffer.position(in.getFilePointer());
this.bufferView = buffer.duplicate();
buffer.position(buffer.position() + (numLongs * Long.BYTES));
in.seek(in.getFilePointer() + (numLongs * Long.BYTES));
}
@Override
public long getElementValue(long index, int bitsPerElement) {
return getElementValue(index, bitsPerElement, ((1L << bitsPerElement) - 1));
}
@Override
public long getElementValue(long index, int bitsPerElement, long mask) {
long whichByte = index >>> 3;
int whichBit = (int) (index & 0x07);
if (whichByte + ceil((float) bitsPerElement/8) > this.maxByteIndex + 1) {
throw new IllegalStateException();
}
long longVal = this.bufferView.getLong(this.bufferView.position() + whichByte);
long l = longVal >>> whichBit;
return l & mask;
}
@Override
public long getLargeElementValue(long index, int bitsPerElement) {
long mask = bitsPerElement == 64 ? -1 : ((1L << bitsPerElement) - 1);
return getLargeElementValue(index, bitsPerElement, mask);
}
@Override
public long getLargeElementValue(long index, int bitsPerElement, long mask) {
long whichLong = index >>> 6;
int whichBit = (int) (index & 0x3F);
long l = this.bufferView.getLong(bufferView.position() + whichLong * Long.BYTES) >>> whichBit;
int bitsRemaining = 64 - whichBit;
if (bitsRemaining < bitsPerElement) {
whichLong++;
l |= this.bufferView.getLong(bufferView.position() + whichLong * Long.BYTES) << bitsRemaining;
}
return l & mask;
}
@Override
public void setElementValue(long index, int bitsPerElement, long value) {
throw new UnsupportedOperationException("Not supported in shared-memory mode");
}
@Override
public void copyBits(FixedLengthData copyFrom, long sourceStartBit, long destStartBit, long numBits){
throw new UnsupportedOperationException("Not supported in shared-memory mode");
}
@Override
public void incrementMany(long startBit, long increment, long bitsBetweenIncrements, int numIncrements){
throw new UnsupportedOperationException("Not supported in shared-memory mode");
}
@Override
public void clearElementValue(long index, int bitsPerElement) {
throw new UnsupportedOperationException("Not supported in shared-memory mode");
}
}
| 9,025 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/encoding/BlobByteBuffer.java | package com.netflix.hollow.core.memory.encoding;
import static java.nio.channels.FileChannel.MapMode.READ_ONLY;
import java.io.IOException;
import java.nio.BufferUnderflowException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
/**
* <p>A stitching of {@link MappedByteBuffer}s to operate on large memory mapped blobs. {@code MappedByteBuffer} is
* limited to mapping memory of integral size. Note that that JDK 14 will introduce improved API for accessing foreign
* memory and replace {@code MappedByteBuffer}.
*
* This class is not thread safe, but it *is* safe to share the underlying Byte Buffers for parallel reads</p>
*
* <p>The largest blob size supported is ~2 exobytes. Presumably other limits in Hollow or practical limits
* are reached before encountering this limit.</p>
*
* @author Sunjeet Singh
* Tim Taylor
*/
public final class BlobByteBuffer {
public static final int MAX_SINGLE_BUFFER_CAPACITY = 1 << 30; // largest, positive power-of-two int
private final ByteBuffer[] spine; // array of MappedByteBuffers
private final long capacity; // in bytes
private final int shift;
private final int mask;
private long position; // within index 0 to capacity-1 in the underlying ByteBuffer
private BlobByteBuffer(long capacity, int shift, int mask, ByteBuffer[] spine) {
this(capacity, shift, mask, spine, 0);
}
private BlobByteBuffer(long capacity, int shift, int mask, ByteBuffer[] spine, long position) {
if (!spine[0].order().equals(ByteOrder.BIG_ENDIAN)) {
throw new UnsupportedOperationException("Little endian memory layout is not supported");
}
this.capacity = capacity;
this.shift = shift;
this.mask = mask;
this.position = position;
// The following assignment is purposefully placed *after* the population of all segments (this method is called
// after mmap). The final assignment after the initialization of the array of MappedByteBuffers guarantees that
// no thread will see any of the array elements before assignment.
this.spine = spine;
}
/**
* Returns a view on the current {@code BlobByteBuffer} as a new {@code BlobByteBuffer}.
* The returned buffer's capacity, shift, mark, spine, and position will be identical to those of this buffer.
* @return a new {@code BlobByteBuffer} which is view on the current {@code BlobByteBuffer}
*/
public BlobByteBuffer duplicate() {
return new BlobByteBuffer(this.capacity, this.shift, this.mask, this.spine, this.position);
}
/**
* mmap the entire contents of FileChannel into an array of {@code MappedByteBuffer}s, each of size singleBufferCapacity.
* @param channel FileChannel for file to be mmap-ed
* @param singleBufferCapacity Size of individual MappedByteBuffers in array of {@code MappedByteBuffer}s required
* to map the entire file channel. It must be a power of 2, and due to {@code MappedByteBuffer}
* constraints it is limited to the max integer that is a power of 2.
* @return BlobByteBuffer containing an array of {@code MappedByteBuffer}s that mmap-ed the entire file channel
* @throws IOException
*/
public static BlobByteBuffer mmapBlob(FileChannel channel, int singleBufferCapacity) throws IOException {
long size = channel.size();
if (size == 0) {
throw new IllegalStateException("File to be mmap-ed has no data");
}
if ((singleBufferCapacity & (singleBufferCapacity - 1)) != 0) { // should be a power of 2
throw new IllegalArgumentException("singleBufferCapacity must be a power of 2");
}
// divide into N buffers with an int capacity that is a power of 2
final int bufferCapacity = size > (long) singleBufferCapacity
? singleBufferCapacity
: Integer.highestOneBit((int) size);
long bufferCount = size % bufferCapacity == 0
? size / (long)bufferCapacity
: (size / (long)bufferCapacity) + 1;
if (bufferCount > Integer.MAX_VALUE)
throw new IllegalArgumentException("file too large; size=" + size);
int shift = 31 - Integer.numberOfLeadingZeros(bufferCapacity); // log2
int mask = (1 << shift) - 1;
ByteBuffer[] spine = new MappedByteBuffer[(int)bufferCount];
for (int i = 0; i < bufferCount; i++) {
long pos = (long)i * bufferCapacity;
int cap = i == (bufferCount - 1)
? (int)(size - pos)
: bufferCapacity;
ByteBuffer buffer = channel.map(READ_ONLY, pos, cap);
/*
* if (!((MappedByteBuffer) buffer).isLoaded()) // TODO(timt): make pre-fetching configurable
* ((MappedByteBuffer) buffer).load();
*/
spine[i] = buffer;
}
return new BlobByteBuffer(size, shift, mask, spine);
}
/**
* Return position in bytes.
* @return position in bytes
*/
public long position() {
return this.position;
}
/**
* Set position, in bytes.
* @param position the byte index to set position to
* @return new position in bytes
*/
public BlobByteBuffer position(long position) {
if (position > capacity || position < 0)
throw new IllegalArgumentException("invalid position; position=" + position + " capacity=" + capacity);
this.position = position;
return this;
}
/**
* Reads the byte at the given index.
* @param index byte index (from offset 0 in the backing BlobByteBuffer) at which to read byte value
* @return byte at the given index
* @throws IndexOutOfBoundsException if index out of bounds of the backing buffer
*/
public byte getByte(long index) throws BufferUnderflowException {
if (index < capacity) {
int spineIndex = (int)(index >>> (shift));
int bufferIndex = (int)(index & mask);
return spine[spineIndex].get(bufferIndex);
}
else {
assert(index < capacity + Long.BYTES);
// this situation occurs when read for bits near the end of the buffer requires reading a long value that
// extends past the buffer capacity by upto Long.BYTES bytes. To handle this case,
// return 0 for (index >= capacity - Long.BYTES && index < capacity )
// these zero bytes will be discarded anyway when the returned long value is shifted to get the queried bits
return (byte) 0;
}
}
/**
* Return the long value starting from given byte index. This method is thread safe.
* @param startByteIndex byte index (from offset 0 in the backing BlobByteBuffer) at which to start reading long value
* @return long value
*/
public long getLong(long startByteIndex) throws BufferUnderflowException {
int alignmentOffset = (int)(startByteIndex - this.position()) % Long.BYTES;
long nextAlignedPos = startByteIndex - alignmentOffset + Long.BYTES;
byte[] bytes = new byte[Long.BYTES];
for (int i = 0; i < Long.BYTES; i ++ ) {
bytes[i] = getByte(bigEndian(startByteIndex + i, nextAlignedPos));
}
return ((((long) (bytes[7] )) << 56) |
(((long) (bytes[6] & 0xff)) << 48) |
(((long) (bytes[5] & 0xff)) << 40) |
(((long) (bytes[4] & 0xff)) << 32) |
(((long) (bytes[3] & 0xff)) << 24) |
(((long) (bytes[2] & 0xff)) << 16) |
(((long) (bytes[1] & 0xff)) << 8) |
(((long) (bytes[0] & 0xff)) ));
}
/**
* Given big-endian byte order, returns the position into the buffer for a given byte index. Java nio DirectByteBuffers
* are by default big-endian. Big-endianness is validated in the constructor.
* @param index byte index
* @param boundary index of the next 8-byte aligned byte
* @return position in buffer
*/
private long bigEndian(long index, long boundary) {
long result;
if (index < boundary) {
result = (boundary - Long.BYTES) + (boundary - index) - 1;
} else {
result = boundary + (boundary + Long.BYTES - index) - 1;
}
return result;
}
}
| 9,026 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/encoding/GapEncodedVariableLengthIntegerReader.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.memory.encoding;
import com.netflix.hollow.core.memory.ByteDataArray;
import com.netflix.hollow.core.memory.SegmentedByteArray;
import com.netflix.hollow.core.memory.pool.ArraySegmentRecycler;
import com.netflix.hollow.core.memory.pool.WastefulRecycler;
import com.netflix.hollow.core.read.HollowBlobInput;
import com.netflix.hollow.core.util.IOUtils;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
public class GapEncodedVariableLengthIntegerReader {
public static GapEncodedVariableLengthIntegerReader EMPTY_READER = new GapEncodedVariableLengthIntegerReader(null, 0) {
@Override
public int nextElement() {
return Integer.MAX_VALUE;
}
};
private final SegmentedByteArray data;
private final int numBytes;
private int currentPosition;
private int nextElement;
private int elementIndex;
public GapEncodedVariableLengthIntegerReader(SegmentedByteArray data, int numBytes) {
this.data = data;
this.numBytes = numBytes;
reset();
}
public boolean isEmpty() {
return numBytes == 0;
}
public void advance() {
if(currentPosition == numBytes) {
nextElement = Integer.MAX_VALUE;
} else {
int nextElementDelta = VarInt.readVInt(data, currentPosition);
currentPosition += VarInt.sizeOfVInt(nextElementDelta);
nextElement += nextElementDelta;
elementIndex++;
}
}
public int nextElement() {
return nextElement;
}
public int elementIndex() {
return elementIndex;
}
public void reset() {
currentPosition = 0;
elementIndex = -1;
nextElement = 0;
advance();
}
public int remainingElements() {
int remainingElementCount = 0;
while(nextElement != Integer.MAX_VALUE) {
remainingElementCount++;
advance();
}
return remainingElementCount;
}
public void destroy() {
if(data != null)
data.destroy();
}
public void writeTo(OutputStream os) throws IOException {
VarInt.writeVInt(os, numBytes);
data.writeTo(os, 0, numBytes);
}
public static GapEncodedVariableLengthIntegerReader readEncodedDeltaOrdinals(HollowBlobInput in, ArraySegmentRecycler memoryRecycler) throws IOException {
SegmentedByteArray arr = new SegmentedByteArray(memoryRecycler);
long numBytesEncodedOrdinals = VarInt.readVLong(in);
arr.loadFrom(in, numBytesEncodedOrdinals);
return new GapEncodedVariableLengthIntegerReader(arr, (int)numBytesEncodedOrdinals);
}
public static void copyEncodedDeltaOrdinals(HollowBlobInput in, DataOutputStream... os) throws IOException {
long numBytesEncodedOrdinals = IOUtils.copyVLong(in, os);
IOUtils.copyBytes(in, os, numBytesEncodedOrdinals);
}
public static void discardEncodedDeltaOrdinals(HollowBlobInput in) throws IOException {
long numBytesToSkip = VarInt.readVLong(in);
while(numBytesToSkip > 0) {
numBytesToSkip -= in.skipBytes(numBytesToSkip);
}
}
public static GapEncodedVariableLengthIntegerReader combine(GapEncodedVariableLengthIntegerReader reader1, GapEncodedVariableLengthIntegerReader reader2, ArraySegmentRecycler memoryRecycler) {
reader1.reset();
reader2.reset();
ByteDataArray arr = new ByteDataArray(memoryRecycler);
int cur = 0;
while(reader1.nextElement() != Integer.MAX_VALUE || reader2.nextElement() != Integer.MAX_VALUE) {
if(reader1.nextElement() < reader2.nextElement()) {
VarInt.writeVInt(arr, reader1.nextElement() - cur);
cur = reader1.nextElement();
reader1.advance();
} else if(reader2.nextElement() < reader1.nextElement()) {
VarInt.writeVInt(arr, reader2.nextElement() - cur);
cur = reader2.nextElement();
reader2.advance();
} else {
VarInt.writeVInt(arr, reader1.nextElement() - cur);
cur = reader1.nextElement();
reader1.advance();
reader2.advance();
}
}
return new GapEncodedVariableLengthIntegerReader(arr.getUnderlyingArray(), (int)arr.length());
}
/**
* Splits this {@code GapEncodedVariableLengthIntegerReader} into {@code numSplits} new instances.
* The original data is not cleaned up.
*
* @param numSplits the number of instances to split into, should be a power of 2.
* @return an array of {@code GapEncodedVariableLengthIntegerReader} instances populated with the results of the split.
*/
public GapEncodedVariableLengthIntegerReader[] split(int numSplits) {
if (numSplits<=0 || !((numSplits&(numSplits-1))==0)) {
throw new IllegalStateException("Split should only be called with powers of 2, it was called with " + numSplits);
}
final int toMask = numSplits - 1;
final int toOrdinalShift = 31 - Integer.numberOfLeadingZeros(numSplits);
GapEncodedVariableLengthIntegerReader[] to = new GapEncodedVariableLengthIntegerReader[numSplits];
List<Integer> ordinals = new ArrayList<>();
reset();
while(nextElement() != Integer.MAX_VALUE) {
ordinals.add(nextElement());
advance();
}
ByteDataArray[] splitOrdinals = new ByteDataArray[numSplits];
int previousSplitOrdinal[] = new int[numSplits];
for (int ordinal : ordinals) {
int toIndex = ordinal & toMask;
int toOrdinal = ordinal >> toOrdinalShift;
if (splitOrdinals[toIndex] == null) {
splitOrdinals[toIndex] = new ByteDataArray(WastefulRecycler.DEFAULT_INSTANCE);
}
VarInt.writeVInt(splitOrdinals[toIndex], toOrdinal - previousSplitOrdinal[toIndex]);
previousSplitOrdinal[toIndex] = toOrdinal;
}
for(int i=0;i<numSplits;i++) {
if (splitOrdinals[i] == null) {
to[i] = EMPTY_READER;
} else {
to[i] = new GapEncodedVariableLengthIntegerReader(splitOrdinals[i].getUnderlyingArray(), (int) splitOrdinals[i].length());
}
}
return to;
}
/**
* Join an array of {@code GapEncodedVariableLengthIntegerReader} instances into one.
* The original data is not cleaned up.
*
* @param from the array of {@code GapEncodedVariableLengthIntegerReader} to join, should have a power of 2 number of elements.
* @return an instance of {@code GapEncodedVariableLengthIntegerReader} with the joined result.
*/
public static GapEncodedVariableLengthIntegerReader join(GapEncodedVariableLengthIntegerReader[] from) {
if (from==null) {
throw new IllegalStateException("Join invoked on a null input array");
}
if (from.length<=0 || !((from.length&(from.length-1))==0)) {
throw new IllegalStateException("Join should only be called with powers of 2, it was called with " + from.length);
}
int numSplits = from.length;
final int fromMask = numSplits - 1;
final int fromOrdinalShift = 31 - Integer.numberOfLeadingZeros(numSplits);
int joinedMaxOrdinal = -1;
HashSet<Integer>[] fromOrdinals = new HashSet[from.length];
for (int i=0;i<from.length;i++) {
fromOrdinals[i] = new HashSet<>();
if (from[i] == null) {
continue;
}
from[i].reset();
while(from[i].nextElement() != Integer.MAX_VALUE) {
int splitOrdinal = from[i].nextElement();
fromOrdinals[i].add(splitOrdinal);
joinedMaxOrdinal = Math.max(joinedMaxOrdinal, splitOrdinal*numSplits + i);
from[i].advance();
}
}
ByteDataArray toRemovals = null;
int previousOrdinal = 0;
for (int ordinal=0;ordinal<=joinedMaxOrdinal;ordinal++) {
int fromIndex = ordinal & fromMask;
int fromOrdinal = ordinal >> fromOrdinalShift;
if (fromOrdinals[fromIndex].contains(fromOrdinal)) {
if (toRemovals == null) {
toRemovals = new ByteDataArray(WastefulRecycler.DEFAULT_INSTANCE);
}
VarInt.writeVInt(toRemovals, ordinal - previousOrdinal);
previousOrdinal = ordinal;
}
}
if (toRemovals == null) {
return EMPTY_READER;
} else {
return new GapEncodedVariableLengthIntegerReader(toRemovals.getUnderlyingArray(), (int) toRemovals.length());
}
}
}
| 9,027 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/encoding/FixedLengthElementArray.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.memory.encoding;
import com.netflix.hollow.core.memory.FixedLengthData;
import com.netflix.hollow.core.memory.HollowUnsafeHandle;
import com.netflix.hollow.core.memory.SegmentedLongArray;
import com.netflix.hollow.core.memory.pool.ArraySegmentRecycler;
import com.netflix.hollow.core.read.HollowBlobInput;
import java.io.IOException;
import sun.misc.Unsafe;
/**
* Note that for performance reasons, this class makes use of {@code sun.misc.Unsafe} to perform
* unaligned memory reads. This is designed exclusively for little-endian architectures, and has only been
* fully battle-tested on x86-64. As a result there two ways to obtain an element value from the bit string at a given
* bit index.
* <br><br>
* {@link #getElementValue(long, int)} or {@link #getElementValue(long, int, long)}: These methods leverage unsafe
* unaligned (or misaligned) memory reads of {@code long} values from {@code long[]} array segments at byte index
* offsets within the arrays.
* <br><br>
* {@link #getLargeElementValue(long, int)} or {@link #getLargeElementValue(long, int, long)}: These methods leverage
* safe access to {@code long[]} array segments but require more work to compose an element value from bits that cover
* two underlying elements in {@code long[]} array segments.
* <br><br>
* The methods reading unaligned values need to ensure a segmentation fault (SEGV) does not occur when when reading a
* {@code long} value at the last byte of the last index in a {@code long[]} array segment. A {@code long[]} array
* segment is allocated with a length that is one plus the desired length to ensure such access is safe (see the
* implementations of {@link ArraySegmentRecycler#getLongArray()}.
* <br><br>
* In addition, the value of the last underlying element is the same as the value of the first underlying element in the
* subsequent array segment (see {@link SegmentedLongArray#set}). This ensures that an element (n-bit) value can be
* correctly returned when performing an unaligned read that would otherwise cross an array segment boundary.
* <br><br>
* Furthermore, there is an additional constraint that the element values cannot exceed 58 bits. This is because reading
* values that are unaligned with byte boundaries requires shifting by the number of bits the address is offset by
* within a byte. For 58 bit values, the offset from a byte boundary can be as high as 6 bits. 58 bits can be shifted 6
* bits and still fit within the 64 bit space. For 59 bit values the offset from a byte boundary can be as high as
* 7 bits. Shifting a 59 bit value by 6 or 7 bits will both overflow the 64 bit space, resulting in an invalid value
* when reading.
*
*/
@SuppressWarnings("restriction")
public class FixedLengthElementArray extends SegmentedLongArray implements FixedLengthData {
private static final Unsafe unsafe = HollowUnsafeHandle.getUnsafe();
private final int log2OfSegmentSizeInBytes;
private final int byteBitmask;
private final long sizeBits;
public FixedLengthElementArray(ArraySegmentRecycler memoryRecycler, long numBits) {
super(memoryRecycler, ((numBits - 1) >>> 6) + 1);
this.log2OfSegmentSizeInBytes = log2OfSegmentSize + 3;
this.byteBitmask = (1 << log2OfSegmentSizeInBytes) - 1;
this.sizeBits = numBits;
}
public long approxHeapFootprintInBytes() {
return sizeBits / 8;
}
@Override
public void clearElementValue(long index, int bitsPerElement) {
long whichLong = index >>> 6;
int whichBit = (int) (index & 0x3F);
long mask = ((1L << bitsPerElement) - 1);
set(whichLong, get(whichLong) & ~(mask << whichBit));
int bitsRemaining = 64 - whichBit;
if (bitsRemaining < bitsPerElement)
set(whichLong + 1, get(whichLong + 1) & ~(mask >>> bitsRemaining));
}
@Override
public void setElementValue(long index, int bitsPerElement, long value) {
long whichLong = index >>> 6;
int whichBit = (int) (index & 0x3F);
set(whichLong, get(whichLong) | (value << whichBit));
int bitsRemaining = 64 - whichBit;
if (bitsRemaining < bitsPerElement)
set(whichLong + 1, get(whichLong + 1) | (value >>> bitsRemaining));
}
@Override
public long getElementValue(long index, int bitsPerElement) {
return getElementValue(index, bitsPerElement, ((1L << bitsPerElement) - 1));
}
@Override
public long getElementValue(long index, int bitsPerElement, long mask) {
long whichByte = index >>> 3;
int whichBit = (int) (index & 0x07);
int whichSegment = (int) (whichByte >>> log2OfSegmentSizeInBytes);
long[] segment = segments[whichSegment];
long elementByteOffset = (long) Unsafe.ARRAY_LONG_BASE_OFFSET + (whichByte & byteBitmask);
long longVal = unsafe.getLong(segment, elementByteOffset);
long l = longVal >>> whichBit;
return l & mask;
}
@Override
public long getLargeElementValue(long index, int bitsPerElement) {
long mask = bitsPerElement == 64 ? -1 : ((1L << bitsPerElement) - 1);
return getLargeElementValue(index, bitsPerElement, mask);
}
@Override
public long getLargeElementValue(long index, int bitsPerElement, long mask) {
long whichLong = index >>> 6;
int whichBit = (int) (index & 0x3F);
long l = get(whichLong) >>> whichBit;
int bitsRemaining = 64 - whichBit;
if (bitsRemaining < bitsPerElement) {
whichLong++;
l |= get(whichLong) << bitsRemaining;
}
return l & mask;
}
@Override
public void copyBits(FixedLengthData copyFrom, long sourceStartBit, long destStartBit, long numBits) {
if(numBits == 0)
return;
if ((destStartBit & 63) != 0) {
int fillBits = (int) Math.min(64 - (destStartBit & 63), numBits);
long fillValue = copyFrom.getLargeElementValue(sourceStartBit, fillBits);
setElementValue(destStartBit, fillBits, fillValue);
destStartBit += fillBits;
sourceStartBit += fillBits;
numBits -= fillBits;
}
long currentWriteLong = destStartBit >>> 6;
while (numBits >= 64) {
long l = copyFrom.getLargeElementValue(sourceStartBit, 64, -1);
set(currentWriteLong, l);
numBits -= 64;
sourceStartBit += 64;
currentWriteLong++;
}
if (numBits != 0) {
destStartBit = currentWriteLong << 6;
long fillValue = copyFrom.getLargeElementValue(sourceStartBit, (int) numBits);
setElementValue(destStartBit, (int) numBits, fillValue);
}
}
@Override
public void incrementMany(long startBit, long increment, long bitsBetweenIncrements, int numIncrements) {
long endBit = startBit + (bitsBetweenIncrements * numIncrements);
for(; startBit<endBit; startBit += bitsBetweenIncrements) {
increment(startBit, increment);
}
}
public void increment(long index, long increment) {
long whichByte = index >>> 3;
int whichBit = (int) (index & 0x07);
int whichSegment = (int) (whichByte >>> log2OfSegmentSizeInBytes);
long[] segment = segments[whichSegment];
long elementByteOffset = (long) Unsafe.ARRAY_LONG_BASE_OFFSET + (whichByte & byteBitmask);
long l = unsafe.getLong(segment, elementByteOffset);
unsafe.putLong(segment, elementByteOffset, l + (increment << whichBit));
/// update the fencepost longs
if((whichByte & byteBitmask) > bitmask * 8 && (whichSegment + 1) < segments.length) {
unsafe.putLong(segments[whichSegment + 1], (long) Unsafe.ARRAY_LONG_BASE_OFFSET, segments[whichSegment][bitmask + 1]);
}
if((whichByte & byteBitmask) < 8 && whichSegment > 0) {
unsafe.putLong(segments[whichSegment - 1], (long) Unsafe.ARRAY_LONG_BASE_OFFSET + (8 * (bitmask + 1)), segments[whichSegment][0]);
}
}
public static FixedLengthElementArray newFrom(HollowBlobInput in, ArraySegmentRecycler memoryRecycler)
throws IOException {
long numLongs = VarInt.readVLong(in);
return newFrom(in, memoryRecycler, numLongs);
}
public static FixedLengthElementArray newFrom(HollowBlobInput in, ArraySegmentRecycler memoryRecycler, long numLongs)
throws IOException {
FixedLengthElementArray arr = new FixedLengthElementArray(memoryRecycler, numLongs * 64);
arr.readFrom(in, memoryRecycler, numLongs);
return arr;
}
}
| 9,028 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/encoding/HashCodes.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.memory.encoding;
import static com.netflix.hollow.core.HollowConstants.HASH_TABLE_MAX_SIZE;
import com.netflix.hollow.core.memory.ArrayByteData;
import com.netflix.hollow.core.memory.ByteData;
import com.netflix.hollow.core.memory.ByteDataArray;
public class HashCodes {
private static final int MURMURHASH_SEED = 0xeab524b9;
public static int hashCode(ByteDataArray data) {
return hashCode(data.getUnderlyingArray(), 0, (int) data.length());
}
public static int hashCode(final String data) {
if(data == null)
return -1;
int arrayLen = calculateByteArrayLength(data);
if(arrayLen == data.length()) {
return hashCode(new ByteData() {
@Override
public byte get(long position) {
return (byte)(data.charAt((int)position) & 0x7F);
}
}, 0, data.length());
} else {
byte[] array = createByteArrayFromString(data, arrayLen);
return hashCode(array);
}
}
public static int hashCode(byte[] data) {
return hashCode(new ArrayByteData(data), 0, data.length);
}
private static int calculateByteArrayLength(String data) {
int length = data.length();
for(int i=0;i<data.length();i++) {
if(data.charAt(i) > 0x7F)
length += VarInt.sizeOfVInt(data.charAt(i)) - 1;
}
return length;
}
private static byte[] createByteArrayFromString(String data, int arrayLen) {
byte array[] = new byte[arrayLen];
int pos = 0;
for(int i=0;i<data.length();i++) {
pos = VarInt.writeVInt(array, pos, data.charAt(i));
}
return array;
}
/**
* MurmurHash3. Adapted from:<p>
*
* https://github.com/yonik/java_util/blob/master/src/util/hash/MurmurHash3.java<p>
*
* On 11/19/2013 the license for this file read:<p>
*
* The MurmurHash3 algorithm was created by Austin Appleby. This java port was authored by
* Yonik Seeley and is placed into the public domain. The author hereby disclaims copyright
* to this source code.
* <p>
* This produces exactly the same hash values as the final C++
* version of MurmurHash3 and is thus suitable for producing the same hash values across
* platforms.
* <p>
* The 32 bit x86 version of this hash should be the fastest variant for relatively short keys like ids.
* <p>
* Note - The x86 and x64 versions do _not_ produce the same results, as the
* algorithms are optimized for their respective platforms.
* <p>
* See http://github.com/yonik/java_util for future updates to this file.
*
* @param data the data to hash
* @param offset the offset
* @param len the length
* @return the hash code
*/
public static int hashCode(ByteData data, long offset, int len) {
final int c1 = 0xcc9e2d51;
final int c2 = 0x1b873593;
int h1 = MURMURHASH_SEED;
long roundedEnd = offset + (len & 0xfffffffffffffffcL); // round down to
// 4 byte block
for (long i = offset; i < roundedEnd; i += 4) {
// little endian load order
int k1 = (data.get(i) & 0xff) | ((data.get(i + 1) & 0xff) << 8) | ((data.get(i + 2) & 0xff) << 16) | (data.get(i + 3) << 24);
k1 *= c1;
k1 = (k1 << 15) | (k1 >>> 17); // ROTL32(k1,15);
k1 *= c2;
h1 ^= k1;
h1 = (h1 << 13) | (h1 >>> 19); // ROTL32(h1,13);
h1 = h1 * 5 + 0xe6546b64;
}
// tail
int k1 = 0;
switch (len & 0x03) {
case 3:
k1 = (data.get(roundedEnd + 2) & 0xff) << 16;
// fallthrough
case 2:
k1 |= (data.get(roundedEnd + 1) & 0xff) << 8;
// fallthrough
case 1:
k1 |= (data.get(roundedEnd) & 0xff);
k1 *= c1;
k1 = (k1 << 15) | (k1 >>> 17); // ROTL32(k1,15);
k1 *= c2;
h1 ^= k1;
}
// finalization
h1 ^= len;
// fmix(h1);
h1 ^= h1 >>> 16;
h1 *= 0x85ebca6b;
h1 ^= h1 >>> 13;
h1 *= 0xc2b2ae35;
h1 ^= h1 >>> 16;
return h1;
}
public static int hashLong(long key) {
key = (~key) + (key << 18);
key ^= (key >>> 31);
key *= 21;
key ^= (key >>> 11);
key += (key << 6);
key ^= (key >>> 22);
return (int) key;
}
public static int hashInt(int key) {
key = ~key + (key << 15);
key = key ^ (key >>> 12);
key = key + (key << 2);
key = key ^ (key >>> 4);
key = key * 2057;
key = key ^ (key >>> 16);
return key;
}
/**
* Determine size of hash table capable of storing the specified number of elements with a load
* factor applied.
*
* @param numElements number of elements to be stored in the table
* @return size of hash table, always a power of 2
* @throws IllegalArgumentException when numElements is negative or exceeds
* {@link com.netflix.hollow.core.HollowConstants#HASH_TABLE_MAX_SIZE}
*/
public static int hashTableSize(int numElements) throws IllegalArgumentException {
if (numElements < 0) {
throw new IllegalArgumentException("cannot be negative; numElements="+numElements);
} else if (numElements > HASH_TABLE_MAX_SIZE) {
throw new IllegalArgumentException("exceeds maximum number of buckets; numElements="+numElements);
}
if (numElements == 0)
return 1;
if (numElements < 3)
return numElements * 2;
// Apply load factor to number of elements and determine next
// largest power of 2 that fits in an int
int sizeAfterLoadFactor = (int)((long)numElements * 10 / 7);
int bits = 32 - Integer.numberOfLeadingZeros(sizeAfterLoadFactor - 1);
return 1 << bits;
}
}
| 9,029 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/encoding/VarInt.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.memory.encoding;
import com.netflix.hollow.core.memory.ByteData;
import com.netflix.hollow.core.memory.ByteDataArray;
import com.netflix.hollow.core.read.HollowBlobInput;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
/**
* Variable-byte integer encoding and decoding logic
*
* @author dkoszewnik
*/
public class VarInt {
/**
* Write a 'null' variable length integer into the supplied {@link ByteDataArray}
*
* @param buf the buffer to write to
*/
public static void writeVNull(ByteDataArray buf) {
buf.write((byte)0x80);
return;
}
/**
* Write a 'null' variable length integer into the supplied byte array.
*
* @param data the byte array to write to
* @param pos the position in the byte array
* @return the next position in the byte array after the null has been written
*/
public static int writeVNull(byte data[], int pos) {
data[pos++] = ((byte)0x80);
return pos;
}
/**
* Encode the specified long as a variable length integer into the supplied {@link ByteDataArray}
*
* @param buf the buffer to write to
* @param value the long value
*/
public static void writeVLong(ByteDataArray buf, long value) {
if(value < 0) buf.write((byte)0x81);
if(value > 0xFFFFFFFFFFFFFFL || value < 0) buf.write((byte)(0x80 | ((value >>> 56) & 0x7FL)));
if(value > 0x1FFFFFFFFFFFFL || value < 0) buf.write((byte)(0x80 | ((value >>> 49) & 0x7FL)));
if(value > 0x3FFFFFFFFFFL || value < 0) buf.write((byte)(0x80 | ((value >>> 42) & 0x7FL)));
if(value > 0x7FFFFFFFFL || value < 0) buf.write((byte)(0x80 | ((value >>> 35) & 0x7FL)));
if(value > 0xFFFFFFFL || value < 0) buf.write((byte)(0x80 | ((value >>> 28) & 0x7FL)));
if(value > 0x1FFFFFL || value < 0) buf.write((byte)(0x80 | ((value >>> 21) & 0x7FL)));
if(value > 0x3FFFL || value < 0) buf.write((byte)(0x80 | ((value >>> 14) & 0x7FL)));
if(value > 0x7FL || value < 0) buf.write((byte)(0x80 | ((value >>> 7) & 0x7FL)));
buf.write((byte)(value & 0x7FL));
}
/**
* Encode the specified long as a variable length integer into the supplied OuputStream
*
* @param out the output stream to write to
* @param value the long value
* @throws IOException if the value cannot be written to the output stream
*/
public static void writeVLong(OutputStream out, long value) throws IOException {
if(value < 0) out.write((byte)0x81);
if(value > 0xFFFFFFFFFFFFFFL || value < 0) out.write((byte)(0x80 | ((value >>> 56) & 0x7FL)));
if(value > 0x1FFFFFFFFFFFFL || value < 0) out.write((byte)(0x80 | ((value >>> 49) & 0x7FL)));
if(value > 0x3FFFFFFFFFFL || value < 0) out.write((byte)(0x80 | ((value >>> 42) & 0x7FL)));
if(value > 0x7FFFFFFFFL || value < 0) out.write((byte)(0x80 | ((value >>> 35) & 0x7FL)));
if(value > 0xFFFFFFFL || value < 0) out.write((byte)(0x80 | ((value >>> 28) & 0x7FL)));
if(value > 0x1FFFFFL || value < 0) out.write((byte)(0x80 | ((value >>> 21) & 0x7FL)));
if(value > 0x3FFFL || value < 0) out.write((byte)(0x80 | ((value >>> 14) & 0x7FL)));
if(value > 0x7FL || value < 0) out.write((byte)(0x80 | ((value >>> 7) & 0x7FL)));
out.write((byte)(value & 0x7FL));
}
/**
* Encode the specified long as a variable length long into the supplied byte array.
*
* @param data the byte array to write to
* @param pos the position in the byte array
* @param value the long value
* @return the next position after the VarLong has been written.
*/
public static int writeVLong(byte data[], int pos, long value) {
if(value < 0) data[pos++] = ((byte)0x81);
if(value > 0xFFFFFFFFFFFFFFL || value < 0) data[pos++] = ((byte)(0x80 | ((value >>> 56) & 0x7FL)));
if(value > 0x1FFFFFFFFFFFFL || value < 0) data[pos++] = ((byte)(0x80 | ((value >>> 49) & 0x7FL)));
if(value > 0x3FFFFFFFFFFL || value < 0) data[pos++] = ((byte)(0x80 | ((value >>> 42) & 0x7FL)));
if(value > 0x7FFFFFFFFL || value < 0) data[pos++] = ((byte)(0x80 | ((value >>> 35) & 0x7FL)));
if(value > 0xFFFFFFFL || value < 0) data[pos++] = ((byte)(0x80 | ((value >>> 28) & 0x7FL)));
if(value > 0x1FFFFFL || value < 0) data[pos++] = ((byte)(0x80 | ((value >>> 21) & 0x7FL)));
if(value > 0x3FFFL || value < 0) data[pos++] = ((byte)(0x80 | ((value >>> 14) & 0x7FL)));
if(value > 0x7FL || value < 0) data[pos++] = ((byte)(0x80 | ((value >>> 7) & 0x7FL)));
data[pos++] = ((byte)(value & 0x7FL));
return pos;
}
/**
* Encode the specified int as a variable length integer into the supplied {@link ByteDataArray}
*
* @param buf the buffer to write to
* @param value the int value
*/
public static void writeVInt(ByteDataArray buf, int value) {
if(value > 0x0FFFFFFF || value < 0) buf.write((byte)(0x80 | ((value >>> 28))));
if(value > 0x1FFFFF || value < 0) buf.write((byte)(0x80 | ((value >>> 21) & 0x7F)));
if(value > 0x3FFF || value < 0) buf.write((byte)(0x80 | ((value >>> 14) & 0x7F)));
if(value > 0x7F || value < 0) buf.write((byte)(0x80 | ((value >>> 7) & 0x7F)));
buf.write((byte)(value & 0x7F));
}
/**
* Encode the specified int as a variable length integer into the supplied OutputStream
*
* @param out the output stream to write to
* @param value the int value
* @throws IOException if the value cannot be written to the output stream
*/
public static void writeVInt(OutputStream out, int value) throws IOException {
if(value > 0x0FFFFFFF || value < 0) out.write((byte)(0x80 | ((value >>> 28))));
if(value > 0x1FFFFF || value < 0) out.write((byte)(0x80 | ((value >>> 21) & 0x7F)));
if(value > 0x3FFF || value < 0) out.write((byte)(0x80 | ((value >>> 14) & 0x7F)));
if(value > 0x7F || value < 0) out.write((byte)(0x80 | ((value >>> 7) & 0x7F)));
out.write((byte)(value & 0x7F));
}
/**
* Write the value as a VarInt into the array, starting at the specified position.
*
* @param data the byte array to write to
* @param pos the position in the byte array
* @param value the int value
* @return the next position after the VarInt has been written.
*/
public static int writeVInt(byte data[], int pos, int value) {
if(value > 0x0FFFFFFF || value < 0) data[pos++] = ((byte)(0x80 | ((value >>> 28))));
if(value > 0x1FFFFF || value < 0) data[pos++] = ((byte)(0x80 | ((value >>> 21) & 0x7F)));
if(value > 0x3FFF || value < 0) data[pos++] = ((byte)(0x80 | ((value >>> 14) & 0x7F)));
if(value > 0x7F || value < 0) data[pos++] = ((byte)(0x80 | ((value >>> 7) & 0x7F)));
data[pos++] = (byte)(value & 0x7F);
return pos;
}
/**
* Determine whether or not the value at the specified position in the supplied {@link ByteData} is
* a 'null' variable length integer.
*
* @param arr the byte data to read from
* @param position the position in the byte data to read from
* @return true if the value is null
*/
public static boolean readVNull(ByteData arr, long position) {
return arr.get(position) == (byte)0x80;
}
/**
* Determine whether the value at the specified position in the supplied byte array is a 'null' variable
* length integer.
*
* @param arr the byte data to read from
* @param position the position in the byte data to read from
* @return true if the value is null
*/
public static boolean readVNull(byte[] arr, int position) {
return arr[position] == (byte)0x80;
}
/**
* Read a variable length integer from the supplied {@link ByteData} starting at the specified position.
* @param arr the byte data to read from
* @param position the position in the byte data to read from
* @return the int value
*/
public static int readVInt(ByteData arr, long position) {
byte b = arr.get(position++);
if(b == (byte) 0x80)
throw new RuntimeException("Attempting to read null value as int");
int value = b & 0x7F;
while ((b & 0x80) != 0) {
b = arr.get(position++);
value <<= 7;
value |= (b & 0x7F);
}
return value;
}
/**
* Read a variable length integer from the supplied byte array starting at the specified position.
*
* @param arr the byte data to read from
* @param position the position in the byte data to read from
* @return the int value
*/
public static int readVInt(byte[] arr, int position) {
byte b = arr[position++];
if(b == (byte) 0x80)
throw new RuntimeException("Attempting to read null value as int");
int value = b & 0x7F;
while ((b & 0x80) != 0) {
b = arr[position++];
value <<= 7;
value |= (b & 0x7F);
}
return value;
}
/**
* Read a series of variable length integers from the supplied {@link ByteData} starting at the specified position.
* @param arr the byte data to read from
* @param position the position in the byte data to read from
* @param length the number of bytes that should be read
* @param output an array where outputs will be placed, which should be at least {@code length} long and zero.
* @return the number of values written
*/
public static int readVIntsInto(ByteData arr, long position, int length, int[] output) {
// two loops, first for single-byte encodes, falling back to second full-featured loop
int i = 0;
for(; i < length; i++) {
int b = arr.get(position + i);
if ((b & 0x80) != 0)
break;
output[i] = b;
}
int count = i;
for(; i < length; i++) {
int b = arr.get(position + i);
output[count] = (output[count] << 7) | (b & 0x7f);
count += (~b >> 7) & 0x1;
}
return count;
}
/**
* Read a series of variable length integers (as chars) from the supplied {@link ByteData} starting at the specified position.
* @param arr the byte data to read from
* @param position the position in the byte data to read from
* @param length the number of bytes that should be read
* @param output an array where outputs will be placed, which should be at least {@code length} long and zero.
* @return the number of values written
*/
public static int readVIntsInto(ByteData arr, long position, int length, char[] output) {
// two loops, first for single-byte encodes, falling back to second full-featured loop
int i = 0;
for(; i < length; i++) {
int b = arr.get(position + i);
if ((b & 0x80) != 0)
break;
output[i] = (char) b;
}
int count = i;
for(; i < length; i++) {
int b = arr.get(position + i);
output[count] = (char) ((output[count] << 7) | (b & 0x7f));
count += (~b >> 7) & 0x1;
}
return count;
}
/**
* Read a variable length integer from the supplied InputStream
* @param in the Hollow blob input to read from
* @return the int value
* @throws IOException if the value cannot be read from the input
*/
public static int readVInt(InputStream in) throws IOException {
byte b = readByteSafely(in);
if(b == (byte) 0x80)
throw new RuntimeException("Attempting to read null value as int");
int value = b & 0x7F;
while ((b & 0x80) != 0) {
b = readByteSafely(in);
value <<= 7;
value |= (b & 0x7F);
}
return value;
}
/**
* Read a variable length integer from the supplied HollowBlobInput
* @param in the Hollow blob input to read from
* @return the int value
* @throws IOException if the value cannot be read from the input
*/
public static int readVInt(HollowBlobInput in) throws IOException {
byte b = readByteSafely(in);
if(b == (byte) 0x80)
throw new RuntimeException("Attempting to read null value as int");
int value = b & 0x7F;
while ((b & 0x80) != 0) {
b = readByteSafely(in);
value <<= 7;
value |= (b & 0x7F);
}
return value;
}
/**
* Read a variable length long from the supplied {@link ByteData} starting at the specified position.
* @param arr the byte data to read from
* @param position the position in the byte data to read from
* @return the long value
*/
public static long readVLong(ByteData arr, long position) {
byte b = arr.get(position++);
if(b == (byte) 0x80)
throw new RuntimeException("Attempting to read null value as long");
long value = b & 0x7F;
while ((b & 0x80) != 0) {
b = arr.get(position++);
value <<= 7;
value |= (b & 0x7F);
}
return value;
}
/**
* Read a variable length long from the supplied byte array starting at the specified position.
*
* @param arr the byte data to read from
* @param position the position in the byte data to read from
* @return the long value
*/
public static long readVLong(byte[] arr, int position) {
byte b = arr[position++];
if(b == (byte) 0x80)
throw new RuntimeException("Attempting to read null value as long");
long value = b & 0x7F;
while ((b & 0x80) != 0) {
b = arr[position++];
value <<= 7;
value |= (b & 0x7F);
}
return value;
}
/**
* Determine the size (in bytes) of the variable length long in the supplied {@link ByteData}, starting at the specified position.
* @param arr the byte data to read from
* @param position the position in the byte data to read from
* @return the long value
*/
public static int nextVLongSize(ByteData arr, long position) {
byte b = arr.get(position++);
if(b == (byte) 0x80)
return 1;
int length = 1;
while((b & 0x80) != 0) {
b = arr.get(position++);
length++;
}
return length;
}
/**
* Determine the size (in bytes) of the variable length long in the supplied byte array, starting at the specified position.
*
* @param arr the byte data to read from
* @param position the position in the byte data to read from
* @return the size of long value
*/
public static int nextVLongSize(byte[] arr, int position) {
byte b = arr[position++];
if(b == (byte) 0x80)
return 1;
int length = 1;
while((b & 0x80) != 0) {
b = arr[position++];
length++;
}
return length;
}
/**
* Read a variable length long from the supplied InputStream.
* @param in the input stream to read from
* @return the long value
* @throws IOException if the value cannot be read from the input stream
*/
public static long readVLong(InputStream in) throws IOException {
byte b = readByteSafely(in);
if(b == (byte) 0x80)
throw new RuntimeException("Attempting to read null value as long");
long value = b & 0x7F;
while ((b & 0x80) != 0) {
b = readByteSafely(in);
value <<= 7;
value |= (b & 0x7F);
}
return value;
}
/**
* Read a variable length long from the supplied HollowBlobInput.
* @param in the Hollow blob input to read from
* @return the long value
* @throws IOException if the value cannot be read from the input
*/
public static long readVLong(HollowBlobInput in) throws IOException {
byte b = readByteSafely(in);
if(b == (byte) 0x80)
throw new RuntimeException("Attempting to read null value as long");
long value = b & 0x7F;
while ((b & 0x80) != 0) {
b = readByteSafely(in);
value <<= 7;
value |= (b & 0x7F);
}
return value;
}
/**
* Determine the size (in bytes) of the specified value when encoded as a variable length integer.
* @param value the int value
* @return the size (int bytes) of the value when encoded
*/
public static int sizeOfVInt(int value) {
if(value < 0)
return 5;
if(value < 0x80)
return 1;
if(value < 0x4000)
return 2;
if(value < 0x200000)
return 3;
if(value < 0x10000000)
return 4;
return 5;
}
/**
* Determine the size (in bytes) of the specified value when encoded as a variable length integer.
* @param value the long value
* @return the size (int bytes) of the value when encoded
*/
public static int sizeOfVLong(long value) {
if(value < 0L)
return 10;
if(value < 0x80L)
return 1;
if(value < 0x4000L)
return 2;
if(value < 0x200000L)
return 3;
if(value < 0x10000000L)
return 4;
if(value < 0x800000000L)
return 5;
if(value < 0x40000000000L)
return 6;
if(value < 0x2000000000000L)
return 7;
if(value < 0x100000000000000L)
return 8;
return 9;
}
/**
* Count the number of variable length integers encoded in the supplied {@link ByteData} in the specified range.
* @param byteData the byte data
* @param fieldPosition the field position
* @param length the length
* @return number of variable length integers encoded over a range in the byte data
*/
public static int countVarIntsInRange(ByteData byteData, long fieldPosition, int length) {
int numInts = 0;
boolean insideInt = false;
for(int i=0;i<length;i++) {
byte b = byteData.get(fieldPosition + i);
if((b & 0x80) == 0) {
numInts++;
insideInt = false;
} else if(!insideInt && b == (byte)0x80) {
numInts++;
} else {
insideInt = true;
}
}
return numInts;
}
public static byte readByteSafely(InputStream is) throws IOException {
int i = is.read();
if(i == -1) {
throw new EOFException("Unexpected end of VarInt record");
}
return (byte)i;
}
public static byte readByteSafely(HollowBlobInput in) throws IOException {
int i = in.read();
if(i == -1) {
throw new EOFException("Unexpected end of VarInt record");
}
return (byte)i;
}
}
| 9,030 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/pool/RecyclingRecycler.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.memory.pool;
import java.util.ArrayDeque;
import java.util.Arrays;
import java.util.Deque;
/**
* A RecyclingRecycler is an {@link ArraySegmentRecycler} which actually pools arrays, in contrast
* with a {@link WastefulRecycler}.
*/
public class RecyclingRecycler implements ArraySegmentRecycler {
private final int log2OfByteSegmentSize;
private final int log2OfLongSegmentSize;
private final Recycler<long[]> longSegmentRecycler;
private final Recycler<byte[]> byteSegmentRecycler;
public RecyclingRecycler() {
this(11, 8);
}
public RecyclingRecycler(final int log2ByteArraySize, final int log2LongArraySize) {
this.log2OfByteSegmentSize = log2ByteArraySize;
this.log2OfLongSegmentSize = log2LongArraySize;
byteSegmentRecycler = new Recycler<>(() -> new byte[1 << log2ByteArraySize]);
// Allocated size is increased by 1, see JavaDoc of FixedLengthElementArray for details
longSegmentRecycler = new Recycler<>(() -> new long[(1 << log2LongArraySize) + 1]);
}
public int getLog2OfByteSegmentSize() {
return log2OfByteSegmentSize;
}
public int getLog2OfLongSegmentSize() {
return log2OfLongSegmentSize;
}
public long[] getLongArray() {
long[] arr = longSegmentRecycler.get();
Arrays.fill(arr, 0);
return arr;
}
public void recycleLongArray(long[] arr) {
longSegmentRecycler.recycle(arr);
}
public byte[] getByteArray() {
// @@@ should the array be filled?
return byteSegmentRecycler.get();
}
public void recycleByteArray(byte[] arr) {
byteSegmentRecycler.recycle(arr);
}
public void swap() {
longSegmentRecycler.swap();
byteSegmentRecycler.swap();
}
private static class Recycler<T> {
private final Creator<T> creator;
private Deque<T> currentSegments;
private Deque<T> nextSegments;
Recycler(Creator<T> creator) {
// Use an ArrayDeque instead of a LinkedList
// This will avoid memory churn allocating and collecting internal nodes
this.currentSegments = new ArrayDeque<>();
this.nextSegments = new ArrayDeque<>();
this.creator = creator;
}
T get() {
if (!currentSegments.isEmpty()) {
return currentSegments.removeFirst();
}
return creator.create();
}
void recycle(T reuse) {
nextSegments.addLast(reuse);
}
void swap() {
// Swap the deque references to reduce addition and clearing cost
if (nextSegments.size() > currentSegments.size()) {
Deque<T> tmp = nextSegments;
nextSegments = currentSegments;
currentSegments = tmp;
}
currentSegments.addAll(nextSegments);
nextSegments.clear();
}
}
private interface Creator<T> {
T create();
}
}
| 9,031 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/pool/WastefulRecycler.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.memory.pool;
/**
* A WastefulRecycler is an {@link ArraySegmentRecycler} which doesn't <i>really</i> pool arrays, it instead
* just creates them on the demand.
*/
public class WastefulRecycler implements ArraySegmentRecycler {
public static WastefulRecycler DEFAULT_INSTANCE = new WastefulRecycler(11, 8);
public static WastefulRecycler SMALL_ARRAY_RECYCLER = new WastefulRecycler(5, 2);
private final int log2OfByteSegmentSize;
private final int log2OfLongSegmentSize;
public WastefulRecycler(int log2OfByteSegmentSize, int log2OfLongSegmentSize) {
this.log2OfByteSegmentSize = log2OfByteSegmentSize;
this.log2OfLongSegmentSize = log2OfLongSegmentSize;
}
@Override
public int getLog2OfByteSegmentSize() {
return log2OfByteSegmentSize;
}
@Override
public int getLog2OfLongSegmentSize() {
return log2OfLongSegmentSize;
}
@Override
public long[] getLongArray() {
return new long[(1 << log2OfLongSegmentSize) + 1];
}
@Override
public byte[] getByteArray() {
return new byte[(1 << log2OfByteSegmentSize)];
}
@Override
public void recycleLongArray(long[] arr) {
/// do nothing
}
@Override
public void recycleByteArray(byte[] arr) {
/// do nothing
}
@Override
public void swap() {
// do nothing
}
}
| 9,032 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/memory/pool/ArraySegmentRecycler.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.memory.pool;
import com.netflix.hollow.core.memory.SegmentedByteArray;
import com.netflix.hollow.core.memory.SegmentedLongArray;
/**
* An ArraySegmentRecycler is a memory pool.
* <p>
* Hollow pools and reuses memory to minimize GC effects while updating data.
* This pool of memory is kept arrays on the heap. Each array in the pool has a fixed length.
* When a long array or a byte array is required in Hollow, it will stitch together pooled array
* segments as a {@link SegmentedByteArray} or {@link SegmentedLongArray}.
* These classes encapsulate the details of treating segmented arrays as contiguous ranges of values.
*/
public interface ArraySegmentRecycler {
public int getLog2OfByteSegmentSize();
public int getLog2OfLongSegmentSize();
public long[] getLongArray();
public void recycleLongArray(long[] arr);
public byte[] getByteArray();
public void recycleByteArray(byte[] arr);
public void swap();
}
| 9,033 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/util/IOUtils.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.util;
import com.netflix.hollow.core.memory.encoding.VarInt;
import com.netflix.hollow.core.read.HollowBlobInput;
import java.io.DataOutputStream;
import java.io.IOException;
public class IOUtils {
public static void copyBytes(HollowBlobInput in, DataOutputStream[] os, long numBytes) throws IOException {
byte buf[] = new byte[4096];
while(numBytes > 0) {
int numBytesToRead = 4096;
if(numBytes < 4096)
numBytesToRead = (int)numBytes;
int bytesRead = in.read(buf, 0, numBytesToRead);
for(int i=0;i<os.length;i++) {
os[i].write(buf, 0, bytesRead);
}
numBytes -= bytesRead;
}
}
public static void copySegmentedLongArray(HollowBlobInput in, DataOutputStream[] os) throws IOException {
long numLongsToWrite = VarInt.readVLong(in);
for(int i=0;i<os.length;i++)
VarInt.writeVLong(os[i], numLongsToWrite);
copyBytes(in, os, numLongsToWrite * 8);
}
public static int copyVInt(HollowBlobInput in, DataOutputStream[] os) throws IOException {
int value = VarInt.readVInt(in);
for(int i=0;i<os.length;i++)
VarInt.writeVInt(os[i], value);
return value;
}
public static long copyVLong(HollowBlobInput in, DataOutputStream[] os) throws IOException {
long value = VarInt.readVLong(in);
for(int i=0;i<os.length;i++)
VarInt.writeVLong(os[i], value);
return value;
}
}
| 9,034 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/util/BitSetIterator.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.hollow.core.util;
import java.util.BitSet;
import java.util.Iterator;
/**
* A utility to iterate over range of ordinals within a BitSet
*/
public class BitSetIterator implements Iterator<Integer> {
private final BitSet bitset;
private final Integer limit;
private int next = -1;
private int count = 0;
public BitSetIterator(BitSet bitSet) {
this(bitSet, null, null);
}
public BitSetIterator(BitSet bitSet, Integer start, Integer limit) {
this.bitset = bitSet;
this.limit = limit == null ? Integer.MAX_VALUE : limit.intValue();
// advance next to start
if (start == null || start.intValue() <= 1) {
next = bitset.nextSetBit(0);
} else {
for (int i = 0; i < start; i++) {
next = bitset.nextSetBit(next + 1);
if (next == -1)
break;
}
}
}
@Override
public boolean hasNext() {
return next != -1;
}
@Override
public Integer next() {
if (!hasNext())
return null;
int returnValue = next;
next = bitset.nextSetBit(next + 1);
if (++count >= limit)
next = -1;
return returnValue;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
| 9,035 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/util/HollowObjectHashCodeFinder.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.util;
import com.netflix.hollow.api.objects.HollowRecord;
import java.util.Set;
/**
* Use of this interface is not recommended. Instead, use the <i>hash key</i> functionality available sets and maps.
* <p>
* With this interface, in conjunction with a cooperating data ingestion mechanism, it is possible to use custom hash codes
* in Hollow sets and maps.
* @deprecated Use hash key the <i>hash key</i> functionality available sets and maps
* @see com.netflix.hollow.core.write.objectmapper.HollowHashKey
*/
@Deprecated
public interface HollowObjectHashCodeFinder {
String DEFINED_HASH_CODES_HEADER_NAME = "DEFINED_HASH_CODES";
/**
* For look-up at runtime.
* <p>
* If using simple ordinal-based hashing, then objectToHash must be a {@link HollowRecord}, and the return value will be
* objectToHash.getOrdinal();
* <p>
* Otherwise, the hash code is determined with exactly the same logic as was used during serialization.
*
* @param objectToHash the object to hash
* @return the hash code
*/
int hashCode(Object objectToHash);
/**
* For serialization.
* <p>
* At serialization time, we know the ordinal of a newly added object, but may not know how to hash the object
* which is being serialized, which is necessary for Sets and Maps.
* <p>
* If using simple ordinal-based hashing, the ordinal will be returned. Otherwise, the return value will be calculated based on the objectToHash.
*
* @param typeName the type name
* @param ordinal the ordinal
* @param objectToHash the object to hash
* @return the hash code
*/
int hashCode(String typeName, int ordinal, Object objectToHash);
/**
* @return the set of types which have hash codes defined (i.e. hash codes which are not simply each record's ordinal)
*/
Set<String> getTypesWithDefinedHashCodes();
@Deprecated
int hashCode(int ordinal, Object objectToHash);
}
| 9,036 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/util/AllHollowRecordCollection.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.util;
import com.netflix.hollow.core.read.engine.HollowTypeReadState;
import com.netflix.hollow.core.read.engine.PopulatedOrdinalListener;
public abstract class AllHollowRecordCollection<T> extends HollowRecordCollection<T> {
public AllHollowRecordCollection(HollowTypeReadState typeState) {
super(typeState.getListener(PopulatedOrdinalListener.class).getPopulatedOrdinals());
}
}
| 9,037 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/util/SimultaneousExecutor.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.util;
import static com.netflix.hollow.core.util.Threads.daemonThread;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.RunnableFuture;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
/**
*
* A convenience wrapper around ThreadPoolExecutor. Provides sane defaults to
* constructor arguments and allows for awaitUninterruptibly().
*
*/
public class SimultaneousExecutor extends ThreadPoolExecutor {
private static final String DEFAULT_THREAD_NAME = "simultaneous-executor";
private final List<Future<?>> futures = new ArrayList<Future<?>>();
/**
* Creates an executor with a thread per processor.
* <p>
* Equivalent to constructing a {@code SimultaneousExecutor} with {@code 1.0d}
* threads per CPU.
*/
public SimultaneousExecutor(Class<?> context, String description) {
this(1.0d, context, description);
}
/**
* Creates an executor with a thread per processor.
* <p>
* Equivalent to calling {@code SimultaneousExecutor(1.0d)}
*
* @deprecated use {@link #SimultaneousExecutor(Class, String)}
*/
@Deprecated
public SimultaneousExecutor() {
this(1.0d, SimultaneousExecutor.class, DEFAULT_THREAD_NAME);
}
/**
* Creates an executor with number of threads calculated from the
* specified factor.
*
* @param threadsPerCpu calculated as {@code processors * threadsPerCpu} then used as {@code corePoolSize} and {@code maximumPoolSize}
* @param context used to name created threads
*/
public SimultaneousExecutor(double threadsPerCpu, Class<?> context) {
this(threadsPerCpu, context, DEFAULT_THREAD_NAME);
}
/**
* Creates an executor with number of threads calculated from the
* specified factor.
*
* @param threadsPerCpu calculated as {@code processors * threadsPerCpu} then used as {@code corePoolSize} and {@code maximumPoolSize}
*
* @deprecated use {@link #SimultaneousExecutor(double, Class)}
*/
@Deprecated
public SimultaneousExecutor(double threadsPerCpu) {
this(threadsPerCpu, SimultaneousExecutor.class, DEFAULT_THREAD_NAME);
}
/**
* Creates an executor with number of threads calculated from the
* specified factor and threads named according to {@code context} and {@code description}.
*
* @param threadsPerCpu calculated as {@code processors * threadsPerCpu} then used as {@code corePoolSize} and {@code maximumPoolSize}
* @param context combined with {@code description} to name created threads
* @param description brief description used to name created threads; combined with {@code context}
*/
public SimultaneousExecutor(double threadsPerCpu, Class<?> context, String description) {
this((int) ((double) Runtime.getRuntime().availableProcessors() * threadsPerCpu), context, description);
}
/**
* Creates an executor with number of threads calculated from the
* specified factor and threads named according to {@code description}.
*
* @param threadsPerCpu calculated as {@code processors * threadsPerCpu} then used as {@code corePoolSize} and {@code maximumPoolSize}
* @param description brief description used to name created threads
*
* @deprecated use {@link #SimultaneousExecutor(double, Class, String)}
*/
@Deprecated
public SimultaneousExecutor(double threadsPerCpu, String description) {
this((int) ((double) Runtime.getRuntime().availableProcessors() * threadsPerCpu), SimultaneousExecutor.class, description);
}
/**
* Creates an executor with the specified number of threads and threads named
* according to {@code context}.
*
* @param numThreads used as {@code corePoolSize} and {@code maximumPoolSize}
* @param context used to name created threads
*/
public SimultaneousExecutor(int numThreads, Class<?> context) {
this(numThreads, context, DEFAULT_THREAD_NAME);
}
/**
* Creates an executor with the specified number of threads.
*
* @param numThreads used as {@code corePoolSize} and {@code maximumPoolSize}
*
* @deprecated use {@link #SimultaneousExecutor(int, Class)}
*/
@Deprecated
public SimultaneousExecutor(int numThreads) {
this(numThreads, SimultaneousExecutor.class, DEFAULT_THREAD_NAME);
}
/**
* Creates an executor with the specified number of threads and threads named
* according to {@code context} and {@code description}.
*
* @param numThreads used as {@code corePoolSize} and {@code maximumPoolSize}
* @param context combined with {@code description} to name created threads
* @param description brief description used to name created threads; combined with {@code context}
*/
public SimultaneousExecutor(int numThreads, Class<?> context, String description) {
this(numThreads, context, description, Thread.NORM_PRIORITY);
}
/**
* Creates an executor with number of threads calculated from the
* specified factor and threads named according to {@code context} and {@code description}.
*
* @param threadsPerCpu calculated as {@code processors * threadsPerCpu} then used as {@code corePoolSize} and {@code maximumPoolSize}
* @param context combined with {@code description} to name created threads
* @param description brief description used to name created threads; combined with {@code context}
* @param threadPriority the priority set to each thread
*/
public SimultaneousExecutor(double threadsPerCpu, Class<?> context, String description, int threadPriority) {
this((int) ((double) Runtime.getRuntime().availableProcessors() * threadsPerCpu), context, description, threadPriority);
}
/**
* Creates an executor with the specified number of threads and threads named
* according to {@code context} and {@code description}.
*
* @param numThreads used as {@code corePoolSize} and {@code maximumPoolSize}
* @param context combined with {@code description} to name created threads
* @param description brief description used to name created threads; combined with {@code context}
* @param threadPriority the priority set to each thread
*/
public SimultaneousExecutor(int numThreads, Class<?> context, String description, int threadPriority) {
this(numThreads, r -> daemonThread(r, context, description, threadPriority));
}
protected SimultaneousExecutor(int numThreads, ThreadFactory threadFactory) {
super(numThreads, numThreads, 100, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), threadFactory);
}
/**
* Creates an executor with the specified number of threads and threads named
* according to {@code description}.
*
* @param numThreads used as {@code corePoolSize} and {@code maximumPoolSize}
* @param description brief description used to name created threads
*
* @deprecated use {@link #SimultaneousExecutor(int, Class, String)}
*/
@Deprecated
public SimultaneousExecutor(int numThreads, final String description) {
this(numThreads, SimultaneousExecutor.class, description);
}
@Override
public void execute(Runnable command) {
if(command instanceof RunnableFuture) {
super.execute(command);
} else {
super.execute(newTaskFor(command, Boolean.TRUE));
}
}
/**
* Awaits completion of all submitted tasks.
*
* After this call completes, the thread pool will be shut down.
*/
public void awaitUninterruptibly() {
shutdown();
while (!isTerminated()) {
try {
awaitTermination(1, TimeUnit.DAYS);
} catch (final InterruptedException e) { }
}
}
@Override
protected final <T> RunnableFuture<T> newTaskFor(final Runnable runnable, final T value) {
final RunnableFuture<T> task = super.newTaskFor(runnable, value);
futures.add(task);
return task;
}
@Override
protected final <T> RunnableFuture<T> newTaskFor(final Callable<T> callable) {
final RunnableFuture<T> task = super.newTaskFor(callable);
futures.add(task);
return task;
}
/**
* Await successful completion of all submitted tasks. Throw exception of the first failed task
* if 1 or more tasks failed.
*
* After this call completes, the thread pool will be shut down.
*
* @throws ExecutionException if a computation threw an
* exception
* @throws InterruptedException if the current thread was interrupted
* while waiting
*/
public void awaitSuccessfulCompletion() throws InterruptedException, ExecutionException {
awaitUninterruptibly();
for (final Future<?> f : futures) {
f.get();
}
}
/**
* Await successful completion of all previously submitted tasks. Throw exception of the first failed task
* if 1 or more tasks failed.
*
* After this call completes, the thread pool will <i>not</i> be shut down and can be reused.
*
* @throws ExecutionException if a computation threw an
* exception
* @throws InterruptedException if the current thread was interrupted
* while waiting
*/
public void awaitSuccessfulCompletionOfCurrentTasks() throws InterruptedException, ExecutionException {
for(Future<?> f : futures) {
f.get();
}
futures.clear();
}
}
| 9,038 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/util/Threads.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.util;
import static java.util.Objects.requireNonNull;
/**
* Internal API.
*/
public final class Threads {
private Threads() {}
public static Thread daemonThread(Runnable r, Class<?> context, String description) {
return daemonThread(r, "hollow", context, description);
}
public static Thread daemonThread(Runnable r, Class<?> context, String description, int priority) {
Thread thread = daemonThread(r, "hollow", context, description);
thread.setPriority(priority);
return thread;
}
public static Thread daemonThread(Runnable r, String platform, Class<?> context, String description) {
requireNonNull(platform, "platform required");
requireNonNull(context, "context required");
requireNonNull(description, "description required");
return daemonThread(r, name(platform, context, description));
}
public static Thread daemonThread(Runnable r, String name) {
requireNonNull(r, "runnable required");
requireNonNull(name, "name required");
Thread t = new Thread(r, name);
t.setDaemon(true);
return t;
}
private static String name(String platform, Class<?> context, String description) {
StringBuilder sb = new StringBuilder();
sb.append(platform);
sb.append(" | ");
sb.append(context.getSimpleName());
sb.append(" | ");
sb.append(description);
return sb.toString();
}
}
| 9,039 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/util/LongList.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.util;
import java.util.Arrays;
/**
* A list of primitive longs
*/
public class LongList {
private long values[];
private int size;
public LongList() {
this(12);
}
public LongList(int initialSize) {
this.values = new long[initialSize];
}
public long get(int index) {
return values[index];
}
public void add(long value) {
if(values.length == size)
values = Arrays.copyOf(values, (values.length * 3) / 2);
values[size++] = value;
}
public int size() {
return size;
}
public void clear() {
size = 0;
}
public void sort() {
Arrays.sort(values, 0, size);
}
@Override
public boolean equals(Object other) {
if(other instanceof LongList) {
LongList that = (LongList)other;
if(this.size() == that.size()) {
for(int i=0;i<size;i++) {
if(this.get(i) != that.get(i))
return false;
}
return true;
}
}
return false;
}
@Override
public int hashCode() {
int result = size;
result = 31 * result + Arrays.hashCode(values);
return result;
}
}
| 9,040 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/util/Versions.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.util;
import static com.netflix.hollow.core.HollowConstants.VERSION_LATEST;
import static com.netflix.hollow.core.HollowConstants.VERSION_NONE;
public final class Versions {
// visible for testing
static final String PRETTY_VERSION_NONE = "none";
static final String PRETTY_VERSION_LATEST = "latest";
private Versions() {}
public static String prettyVersion(long version) {
if (version == VERSION_NONE) {
return PRETTY_VERSION_NONE;
} else if (version == VERSION_LATEST) {
return PRETTY_VERSION_LATEST;
} else {
return String.valueOf(version);
}
}
}
| 9,041 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/util/IntMap.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.util;
import java.util.Arrays;
/**
* A map of positive primitive ints to positive primitive ints.
*/
public class IntMap {
private final int keys[];
private final int values[];
private int size;
public IntMap(int numEntries) {
int arraySize = 1 << 32 - Integer.numberOfLeadingZeros((((numEntries + 1) * 4) / 3) - 1);
keys = new int[arraySize];
values = new int[arraySize];
Arrays.fill(keys, -1);
}
public int size() {
return size;
}
public int get(int key) {
int bucket = hashKey(key) % keys.length;
while (keys[bucket] != -1) {
if (keys[bucket] == key)
return values[bucket];
bucket++;
if (bucket == keys.length)
bucket = 0;
}
return -1;
}
public void put(int key, int value) {
int bucket = hashKey(key) % keys.length;
while (keys[bucket] != -1) {
if (keys[bucket] == key) {
values[bucket] = value;
return;
}
bucket++;
if (bucket == keys.length)
bucket = 0;
}
keys[bucket] = key;
values[bucket] = value;
size++;
}
private int hashKey(int key) {
key = ~key + (key << 15);
key = key ^ (key >>> 12);
key = key + (key << 2);
key = key ^ (key >>> 4);
key = key * 2057;
key = key ^ (key >>> 16);
return key & Integer.MAX_VALUE;
}
public IntMapEntryIterator iterator() {
return new IntMapEntryIterator();
}
public class IntMapEntryIterator {
private int currentEntry = -1;
public boolean next() {
while(++currentEntry < keys.length) {
if(keys[currentEntry] != -1)
return true;
}
return false;
}
public int getKey() {
return keys[currentEntry];
}
public int getValue() {
return values[currentEntry];
}
}
}
| 9,042 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/util/HollowWriteStateCreator.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.util;
import com.netflix.hollow.core.read.engine.HollowReadStateEngine;
import com.netflix.hollow.core.read.engine.HollowTypeReadState;
import com.netflix.hollow.core.read.engine.PopulatedOrdinalListener;
import com.netflix.hollow.core.schema.HollowListSchema;
import com.netflix.hollow.core.schema.HollowMapSchema;
import com.netflix.hollow.core.schema.HollowObjectSchema;
import com.netflix.hollow.core.schema.HollowSchema;
import com.netflix.hollow.core.schema.HollowSchemaParser;
import com.netflix.hollow.core.schema.HollowSetSchema;
import com.netflix.hollow.core.write.HollowListTypeWriteState;
import com.netflix.hollow.core.write.HollowMapTypeWriteState;
import com.netflix.hollow.core.write.HollowObjectTypeWriteState;
import com.netflix.hollow.core.write.HollowSetTypeWriteState;
import com.netflix.hollow.core.write.HollowTypeWriteState;
import com.netflix.hollow.core.write.HollowWriteRecord;
import com.netflix.hollow.core.write.HollowWriteStateEngine;
import com.netflix.hollow.core.write.copy.HollowRecordCopier;
import com.netflix.hollow.tools.combine.IdentityOrdinalRemapper;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.BitSet;
import java.util.Collection;
/**
* Use to pre-populate, or create a {@link HollowWriteStateEngine} which is pre-populated, with a particular data model.
*/
public class HollowWriteStateCreator {
/**
* @param schemas The schemas from the data model
* @return a write state engine which is pre-populated with the specified data model.
*/
public static HollowWriteStateEngine createWithSchemas(Collection<HollowSchema> schemas) {
HollowWriteStateEngine stateEngine = new HollowWriteStateEngine();
populateStateEngineWithTypeWriteStates(stateEngine, schemas);
return stateEngine;
}
/**
* Reads a schema file into the provided HollowWriteStateEngine. The schema file must be on the classpath.
*
* @param schemaFilePath the path to the schema
* @param engine the write state engine
* @throws IOException if the schema could not be read
*/
public static void readSchemaFileIntoWriteState(String schemaFilePath, HollowWriteStateEngine engine)
throws IOException {
InputStream input = null;
try {
input = HollowWriteStateCreator.class.getClassLoader().getResourceAsStream(schemaFilePath);
Collection<HollowSchema> schemas =
HollowSchemaParser.parseCollectionOfSchemas(new BufferedReader(new InputStreamReader(input)));
populateStateEngineWithTypeWriteStates(engine, schemas);
} finally {
if (input != null) {
input.close();
}
}
}
/**
* Pre-populate a {@link HollowWriteStateEngine} with a specified data model.
*
* @param stateEngine The state engine to pre-populate
* @param schemas The schemas from the data model.
*/
public static void populateStateEngineWithTypeWriteStates(HollowWriteStateEngine stateEngine, Collection<HollowSchema> schemas) {
for(HollowSchema schema : schemas) {
if(stateEngine.getTypeState(schema.getName()) == null) {
switch(schema.getSchemaType()) {
case OBJECT:
stateEngine.addTypeState(new HollowObjectTypeWriteState((HollowObjectSchema)schema));
break;
case LIST:
stateEngine.addTypeState(new HollowListTypeWriteState((HollowListSchema)schema));
break;
case SET:
stateEngine.addTypeState(new HollowSetTypeWriteState((HollowSetSchema)schema));
break;
case MAP:
stateEngine.addTypeState(new HollowMapTypeWriteState((HollowMapSchema)schema));
break;
}
}
}
}
/**
* Recreate a {@link HollowWriteStateEngine} which can be used to write a snapshot of or continue
* a delta chain from the supplied {@link HollowReadStateEngine}.
* <p>
* The returned state engine will be ready to write a snapshot which will exactly recreate the data in the supplied {@link HollowReadStateEngine}.
* A delta chain may be continued from this state by calling {@link HollowWriteStateEngine#prepareForNextCycle()}.
*
* @param readEngine the read state engine
* @return the write state engine
*/
public static HollowWriteStateEngine recreateAndPopulateUsingReadEngine(final HollowReadStateEngine readEngine) {
final HollowWriteStateEngine writeEngine = new HollowWriteStateEngine();
populateStateEngineWithTypeWriteStates(writeEngine, readEngine.getSchemas());
populateUsingReadEngine(writeEngine, readEngine);
return writeEngine;
}
/**
* Populate the supplied {@link HollowWriteStateEngine} with all of the records from the supplied {@link HollowReadStateEngine}.
* <ul>
* <li>If fields or types have been removed, then those are ignored when copying records.</li>
* <li>If fields have been added to existing types, those fields will be null in the copied records.</li>
* <li>If types have been added, those types will have no records.</li>
* </ul>
* <p>
* The supplied HollowWriteStateEngine must be newly created, initialized with a data model, and empty. After this call,
* the write engine will be ready to write a snapshot which will exactly recreate the data in the supplied {@link HollowReadStateEngine},
* except with the data model which was initialized.
* <p>
* A delta chain may be continued from this state by calling {@link HollowWriteStateEngine#prepareForNextCycle()}.
*
* @param writeEngine the write state engine
* @param readEngine the read state engine
*/
public static void populateUsingReadEngine(HollowWriteStateEngine writeEngine, HollowReadStateEngine readEngine) {
populateUsingReadEngine(writeEngine, readEngine, true);
}
public static void populateUsingReadEngine(HollowWriteStateEngine writeEngine, HollowReadStateEngine readEngine, boolean preserveHashPositions) {
SimultaneousExecutor executor = new SimultaneousExecutor(HollowWriteStateCreator.class, "populate");
for(HollowTypeWriteState writeState : writeEngine.getOrderedTypeStates()) {
if(writeState.getPopulatedBitSet().cardinality() != 0 || writeState.getPreviousCyclePopulatedBitSet().cardinality() != 0)
throw new IllegalStateException("The supplied HollowWriteStateEngine is already populated!");
}
for(final HollowTypeReadState readState : readEngine.getTypeStates()) {
executor.execute(new Runnable() {
public void run() {
HollowTypeWriteState writeState = writeEngine.getTypeState(readState.getSchema().getName());
if(writeState != null) {
writeState.setNumShards(readState.numShards());
HollowRecordCopier copier = HollowRecordCopier.createCopier(readState, writeState.getSchema(), IdentityOrdinalRemapper.INSTANCE, preserveHashPositions);
BitSet populatedOrdinals = readState.getListener(PopulatedOrdinalListener.class).getPopulatedOrdinals();
writeState.resizeOrdinalMap(populatedOrdinals.cardinality());
int ordinal = populatedOrdinals.nextSetBit(0);
while(ordinal != -1) {
HollowWriteRecord rec = copier.copy(ordinal);
writeState.mapOrdinal(rec, ordinal, false, true);
ordinal = populatedOrdinals.nextSetBit(ordinal + 1);
}
writeState.recalculateFreeOrdinals();
}
}
});
}
try {
executor.awaitSuccessfulCompletion();
} catch(Exception e) {
throw new RuntimeException(e);
}
writeEngine.overridePreviousHeaderTags(readEngine.getHeaderTags());
writeEngine.addHeaderTags(readEngine.getHeaderTags());
writeEngine.overrideNextStateRandomizedTag(readEngine.getCurrentRandomizedTag());
writeEngine.prepareForWrite();
}
}
| 9,043 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/util/IntList.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.util;
import java.util.Arrays;
/**
* A list of primitive ints
*/
public class IntList {
private int values[];
private int size;
public IntList() {
this(12);
}
public IntList(int initialSize) {
this.values = new int[initialSize];
}
public int get(int index) {
return values[index];
}
public void add(int value) {
if(values.length == size)
values = Arrays.copyOf(values, (values.length * 3) / 2);
values[size++] = value;
}
public void addAll(IntList list) {
for(int i=0;i<list.size;i++)
add(list.get(i));
}
public void set(int index, int value) {
values[index] = value;
}
public int size() {
return size;
}
public void clear() {
size = 0;
}
public void sort() {
Arrays.sort(values, 0, size);
}
public int binarySearch(int value) {
return Arrays.binarySearch(values, 0, size, value);
}
public void expandTo(int size) {
if(values.length < size)
values = Arrays.copyOf(values, size);
this.size = size;
}
public void trim() {
values = Arrays.copyOf(values, Math.max(size, 12));
}
public int[] arrayCopyOfRange(int beginIdx, int endIdx) {
int arr[] = new int[endIdx - beginIdx];
System.arraycopy(values, beginIdx, arr, 0, endIdx - beginIdx);
return arr;
}
@Override
public boolean equals(Object other) {
if(other instanceof IntList) {
IntList that = (IntList)other;
if(this.size() == that.size()) {
for(int i=0;i<size;i++) {
if(this.get(i) != that.get(i))
return false;
}
return true;
}
}
return false;
}
@Override
public int hashCode() {
int result = size;
result = 31 * result + Arrays.hashCode(values);
return result;
}
}
| 9,044 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/util/StateEngineRoundTripper.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.util;
import com.netflix.hollow.core.read.HollowBlobInput;
import com.netflix.hollow.core.read.engine.HollowBlobReader;
import com.netflix.hollow.core.read.engine.HollowReadStateEngine;
import com.netflix.hollow.core.read.filter.HollowFilterConfig;
import com.netflix.hollow.core.write.HollowBlobWriter;
import com.netflix.hollow.core.write.HollowWriteStateEngine;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
/**
* A utility to create and keep up-to-date a {@link HollowReadStateEngine} from a {@link HollowWriteStateEngine}
*/
public class StateEngineRoundTripper {
/**
* @param writeEngine the write state engine
* @return a brand-new {@link HollowReadStateEngine} with the dataset populated in the provided {@link HollowWriteStateEngine}
* @throws IOException if the round trip from write to read state failed
*/
public static HollowReadStateEngine roundTripSnapshot(HollowWriteStateEngine writeEngine) throws IOException {
HollowReadStateEngine readEngine = new HollowReadStateEngine();
roundTripSnapshot(writeEngine, readEngine);
return readEngine;
}
/**
* Populate the provided {@link HollowReadStateEngine} with the dataset currently in the provided {@link HollowWriteStateEngine}
*
* @param writeEngine the write state engine
* @param readEngine the read state engine
* @throws IOException if the round trip from write to read state failed
*/
public static void roundTripSnapshot(HollowWriteStateEngine writeEngine, HollowReadStateEngine readEngine) throws IOException {
roundTripSnapshot(writeEngine, readEngine, null);
}
/**
* Populate the provided {@link HollowReadStateEngine} with the dataset currently in the provided {@link HollowWriteStateEngine}.
* <p>
* Apply the provided {@link HollowFilterConfig}.
*
* @param writeEngine the write state engine
* @param readEngine the read state engine
* @param filter the filter configuration
* @throws IOException if the round trip from write to read state failed
*/
public static void roundTripSnapshot(HollowWriteStateEngine writeEngine, HollowReadStateEngine readEngine, HollowFilterConfig filter) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
HollowBlobWriter writer = new HollowBlobWriter(writeEngine);
writer.writeSnapshot(baos);
writeEngine.prepareForNextCycle();
HollowBlobReader reader = new HollowBlobReader(readEngine);
InputStream is = new ByteArrayInputStream(baos.toByteArray());
try (HollowBlobInput in = HollowBlobInput.serial(is)) {
if (filter == null)
reader.readSnapshot(in);
else
reader.readSnapshot(in, filter);
}
}
/**
* Update the provided {@link HollowReadStateEngine} with the new state currently available in the {@link HollowWriteStateEngine}.
* <p>
* It is assumed that the readEngine is currently populated with the prior state from the writeEngine.
*
* @param writeEngine the write state engine
* @param readEngine the read state engine
* @throws IOException if the round trip from write to read state failed
*/
public static void roundTripDelta(HollowWriteStateEngine writeEngine, HollowReadStateEngine readEngine) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
HollowBlobWriter writer = new HollowBlobWriter(writeEngine);
writer.writeDelta(baos);
HollowBlobReader reader = new HollowBlobReader(readEngine);
try (HollowBlobInput hbi = HollowBlobInput.serial(baos.toByteArray())) {
reader.applyDelta(hbi);
}
writeEngine.prepareForNextCycle();
}
}
| 9,045 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/util/RemovedOrdinalIterator.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.util;
import static com.netflix.hollow.core.HollowConstants.ORDINAL_NONE;
import com.netflix.hollow.core.read.engine.HollowReadStateEngine;
import com.netflix.hollow.core.read.engine.PopulatedOrdinalListener;
import java.util.BitSet;
/**
* A utility to iterate over the ordinals which were removed during the last delta transition applied to a {@link HollowReadStateEngine}.
*/
public class RemovedOrdinalIterator {
private final BitSet previousOrdinals;
private final BitSet populatedOrdinals;
private final int previousOrdinalsLength;
private int ordinal = ORDINAL_NONE;
public RemovedOrdinalIterator(PopulatedOrdinalListener listener) {
this(listener.getPreviousOrdinals(), listener.getPopulatedOrdinals());
}
public RemovedOrdinalIterator(BitSet previousOrdinals, BitSet populatedOrdinals) {
this(previousOrdinals, populatedOrdinals, false);
}
public RemovedOrdinalIterator(PopulatedOrdinalListener listener, boolean flip) {
this(listener.getPreviousOrdinals(), listener.getPopulatedOrdinals(), flip);
}
public RemovedOrdinalIterator(BitSet previousOrdinals, BitSet populatedOrdinals, boolean flip) {
if (!flip) {
this.previousOrdinals = previousOrdinals;
this.populatedOrdinals = populatedOrdinals;
this.previousOrdinalsLength = previousOrdinals.length();
} else {
this.previousOrdinals = populatedOrdinals;
this.populatedOrdinals = previousOrdinals;
this.previousOrdinalsLength = populatedOrdinals.length();
}
}
public int next() {
while(ordinal < previousOrdinalsLength) {
ordinal = populatedOrdinals.nextClearBit(ordinal + 1);
if(previousOrdinals.get(ordinal))
return ordinal;
}
return ORDINAL_NONE;
}
public void reset() {
ordinal = ORDINAL_NONE;
}
public int countTotal() {
int bookmark = ordinal;
reset();
int count = 0;
while(next() != ORDINAL_NONE)
count++;
ordinal = bookmark;
return count;
}
}
| 9,046 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/util/DefaultHashCodeFinder.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.util;
import com.netflix.hollow.api.objects.HollowRecord;
import java.util.HashSet;
import java.util.Set;
@Deprecated
public class DefaultHashCodeFinder implements HollowObjectHashCodeFinder {
public static final DefaultHashCodeFinder INSTANCE = new DefaultHashCodeFinder();
private final Set<String> typesWithDefinedHashCodes;
public DefaultHashCodeFinder(String... typesWithDefinedHashCodes) {
this.typesWithDefinedHashCodes = new HashSet<String>(typesWithDefinedHashCodes.length);
for(String type : typesWithDefinedHashCodes) {
this.typesWithDefinedHashCodes.add(type);
}
}
@Deprecated
@Override
public int hashCode(int ordinal, Object objectToHash) {
return hashCode(null, ordinal, objectToHash);
}
@Deprecated
@Override
public int hashCode(Object objectToHash) {
return hashCode(null, objectToHash);
}
public int hashCode(String typeName, int ordinal, Object objectToHash) {
if(!typesWithDefinedHashCodes.contains(typeName))
return ordinal;
return objectToHash.hashCode();
}
public int hashCode(String typeName, Object objectToHash) {
if(objectToHash instanceof HollowRecord)
return ((HollowRecord)objectToHash).getOrdinal();
return objectToHash.hashCode();
}
@Override
public Set<String> getTypesWithDefinedHashCodes() {
return typesWithDefinedHashCodes;
}
}
| 9,047 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/util/HollowRecordCollection.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.util;
import java.util.AbstractCollection;
import java.util.BitSet;
import java.util.Iterator;
public abstract class HollowRecordCollection<T> extends AbstractCollection<T> {
private final BitSet populatedOrdinals;
public HollowRecordCollection(BitSet populatedOrdinals) {
this.populatedOrdinals = populatedOrdinals;
}
@Override
public Iterator<T> iterator() {
return new Iterator<T>() {
private int ordinal = populatedOrdinals.nextSetBit(0);
public boolean hasNext() {
return ordinal != -1;
}
@Override
public T next() {
T t = getForOrdinal(ordinal);
ordinal = populatedOrdinals.nextSetBit(ordinal + 1);
return t;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
@Override
public int size() {
return populatedOrdinals.cardinality();
}
protected abstract T getForOrdinal(int ordinal);
}
| 9,048 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/HollowReadFieldUtils.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read;
import com.netflix.hollow.core.memory.encoding.HashCodes;
import com.netflix.hollow.core.read.dataaccess.HollowDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowObjectTypeDataAccess;
import com.netflix.hollow.core.schema.HollowObjectSchema;
import java.util.Arrays;
/**
* Useful utility methods for interacting with a {@link HollowDataAccess}.
*/
public class HollowReadFieldUtils {
/**
* Hash a field in an OBJECT record.
*
* @param typeAccess the data access
* @param ordinal the ordinal
* @param fieldPosition the field position
* @return the hash code
*/
public static int fieldHashCode(HollowObjectTypeDataAccess typeAccess, int ordinal, int fieldPosition) {
HollowObjectSchema schema = typeAccess.getSchema();
switch(schema.getFieldType(fieldPosition)) {
case BOOLEAN:
Boolean bool = typeAccess.readBoolean(ordinal, fieldPosition);
return booleanHashCode(bool);
case BYTES:
case STRING:
return typeAccess.findVarLengthFieldHashCode(ordinal, fieldPosition);
case DOUBLE:
double d = typeAccess.readDouble(ordinal, fieldPosition);
return doubleHashCode(d);
case FLOAT:
float f = typeAccess.readFloat(ordinal, fieldPosition);
return floatHashCode(f);
case INT:
return intHashCode(typeAccess.readInt(ordinal, fieldPosition));
case LONG:
long l = typeAccess.readLong(ordinal, fieldPosition);
return longHashCode(l);
case REFERENCE:
return typeAccess.readOrdinal(ordinal, fieldPosition);
}
throw new IllegalStateException("I don't know how to hash a " + schema.getFieldType(fieldPosition));
}
public static int hashObject(Object value) {
if (value == null) {
return 0;
}
if(value instanceof Integer) {
return HollowReadFieldUtils.intHashCode((Integer)value);
} else if(value instanceof String) {
return HollowReadFieldUtils.stringHashCode((String)value);
} else if(value instanceof Float) {
return HollowReadFieldUtils.floatHashCode((Float)value);
} else if(value instanceof Double) {
return HollowReadFieldUtils.doubleHashCode((Double)value);
} else if(value instanceof Boolean) {
return HollowReadFieldUtils.booleanHashCode((Boolean) value);
} else if(value instanceof Long) {
return HollowReadFieldUtils.longHashCode((Long) value);
} else if(value instanceof byte[]) {
return HollowReadFieldUtils.byteArrayHashCode((byte[]) value);
} else {
throw new RuntimeException("Unable to hash field of type " + value.getClass().getName());
}
}
/**
* Determine whether two OBJECT field records are exactly equal.
*
* @param typeAccess1 the first type access
* @param ordinal1 the first ordinal
* @param fieldPosition1 the first field positiom
* @param typeAccess2 the second type access
* @param ordinal2 the second ordinal
* @param fieldPosition2 the second field position
*
* @return if the two OBJECT field records are exactly equal
*/
public static boolean fieldsAreEqual(HollowObjectTypeDataAccess typeAccess1, int ordinal1, int fieldPosition1, HollowObjectTypeDataAccess typeAccess2, int ordinal2, int fieldPosition2) {
HollowObjectSchema schema1 = typeAccess1.getSchema();
switch(schema1.getFieldType(fieldPosition1)) {
case BOOLEAN:
Boolean bool1 = typeAccess1.readBoolean(ordinal1, fieldPosition1);
Boolean bool2 = typeAccess2.readBoolean(ordinal2, fieldPosition2);
return bool1 == bool2;
case BYTES:
byte[] data1 = typeAccess1.readBytes(ordinal1, fieldPosition1);
byte[] data2 = typeAccess2.readBytes(ordinal2, fieldPosition2);
return Arrays.equals(data1, data2);
case DOUBLE:
double d1 = typeAccess1.readDouble(ordinal1, fieldPosition1);
double d2 = typeAccess2.readDouble(ordinal2, fieldPosition2);
return Double.compare(d1, d2) == 0;
case FLOAT:
float f1 = typeAccess1.readFloat(ordinal1, fieldPosition1);
float f2 = typeAccess2.readFloat(ordinal2, fieldPosition2);
return Float.compare(f1, f2) == 0;
case INT:
int i1 = typeAccess1.readInt(ordinal1, fieldPosition1);
int i2 = typeAccess2.readInt(ordinal2, fieldPosition2);
return i1 == i2;
case LONG:
long l1 = typeAccess1.readLong(ordinal1, fieldPosition1);
long l2 = typeAccess2.readLong(ordinal2, fieldPosition2);
return l1 == l2;
case STRING:
String s1 = typeAccess1.readString(ordinal1, fieldPosition1);
return typeAccess2.isStringFieldEqual(ordinal2, fieldPosition2, s1);
case REFERENCE:
if(typeAccess1 == typeAccess2 && fieldPosition1 == fieldPosition2)
return typeAccess1.readOrdinal(ordinal1, fieldPosition1) == typeAccess2.readOrdinal(ordinal2, fieldPosition2);
default:
}
throw new IllegalStateException("I don't know how to test equality for a " + schema1.getFieldType(fieldPosition1));
}
/**
* @param typeAccess the type access
* @param ordinal the ordinal
* @param fieldPosition the field position
* @return a displayable String for a field from an OBJECT record.
*/
public static String displayString(HollowObjectTypeDataAccess typeAccess, int ordinal, int fieldPosition) {
HollowObjectSchema schema = typeAccess.getSchema();
switch(schema.getFieldType(fieldPosition)) {
case BOOLEAN:
Boolean bool = typeAccess.readBoolean(ordinal, fieldPosition);
return String.valueOf(bool);
case BYTES:
case STRING:
return typeAccess.readString(ordinal, fieldPosition);
case DOUBLE:
double d = typeAccess.readDouble(ordinal, fieldPosition);
return String.valueOf(d);
case FLOAT:
float f = typeAccess.readFloat(ordinal, fieldPosition);
return String.valueOf(f);
case INT:
return String.valueOf(typeAccess.readInt(ordinal, fieldPosition));
case LONG:
long l = typeAccess.readLong(ordinal, fieldPosition);
return String.valueOf(l);
default:
}
throw new IllegalStateException("I don't know how to display a " + schema.getFieldType(fieldPosition));
}
/**
* @param typeAccess the type access
* @param ordinal the ordinal
* @param fieldPosition the field position
* @return an appropriate Object representing a Hollow OBJECT record field's value
*/
public static Object fieldValueObject(HollowObjectTypeDataAccess typeAccess, int ordinal, int fieldPosition) {
HollowObjectSchema schema = typeAccess.getSchema();
switch(schema.getFieldType(fieldPosition)) {
case BOOLEAN:
return typeAccess.readBoolean(ordinal, fieldPosition);
case BYTES:
return typeAccess.readBytes(ordinal, fieldPosition);
case STRING:
return typeAccess.readString(ordinal, fieldPosition);
case DOUBLE:
double d = typeAccess.readDouble(ordinal, fieldPosition);
return Double.isNaN(d) ? null : Double.valueOf(d);
case FLOAT:
float f = typeAccess.readFloat(ordinal, fieldPosition);
return Float.isNaN(f) ? null : Float.valueOf(f);
case INT:
int i = typeAccess.readInt(ordinal, fieldPosition);
if(i == Integer.MIN_VALUE)
return null;
return Integer.valueOf(i);
case LONG:
long l = typeAccess.readLong(ordinal, fieldPosition);
if(l == Long.MIN_VALUE)
return null;
return Long.valueOf(l);
case REFERENCE:
int refOrdinal = typeAccess.readOrdinal(ordinal, fieldPosition);
if(refOrdinal < 0)
return null;
return Integer.valueOf(refOrdinal);
default:
}
throw new IllegalStateException("Can't handle fieldType=" + schema.getFieldType(fieldPosition) + " for schema=" + schema.getName() + ", fieldPosition=" + fieldPosition);
}
/**
* @param typeAccess the type access
* @param ordinal the ordinal
* @param fieldPosition the field position
* @param testObject the object to test
* @return whether the provided Object matches a Hollow OBJECT record's value.
*/
public static boolean fieldValueEquals(HollowObjectTypeDataAccess typeAccess, int ordinal, int fieldPosition, Object testObject) {
HollowObjectSchema schema = typeAccess.getSchema();
switch(schema.getFieldType(fieldPosition)) {
case BOOLEAN:
if(testObject instanceof Boolean)
return testObject.equals(typeAccess.readBoolean(ordinal, fieldPosition));
return testObject == null && typeAccess.readBoolean(ordinal, fieldPosition) == null;
case BYTES:
if(testObject instanceof byte[])
return Arrays.equals(typeAccess.readBytes(ordinal, fieldPosition), (byte[])testObject);
return testObject == null && typeAccess.readBytes(ordinal, fieldPosition) == null;
case STRING:
if(testObject instanceof String)
return testObject.equals(typeAccess.readString(ordinal, fieldPosition));
return testObject == null && typeAccess.readString(ordinal, fieldPosition) == null;
case DOUBLE:
if(testObject instanceof Double)
return testObject.equals(typeAccess.readDouble(ordinal, fieldPosition));
return testObject == null && !Double.isNaN(typeAccess.readDouble(ordinal, fieldPosition));
case FLOAT:
if(testObject instanceof Float)
return testObject.equals(typeAccess.readFloat(ordinal, fieldPosition));
return testObject == null && !Float.isNaN(typeAccess.readFloat(ordinal, fieldPosition));
case INT:
if(testObject instanceof Integer)
return testObject.equals(typeAccess.readInt(ordinal, fieldPosition));
return testObject == null && typeAccess.readInt(ordinal, fieldPosition) == Integer.MIN_VALUE;
case LONG:
if(testObject instanceof Long)
return testObject.equals(typeAccess.readLong(ordinal, fieldPosition));
return testObject == null && typeAccess.readLong(ordinal, fieldPosition) == Long.MIN_VALUE;
case REFERENCE:
if(testObject instanceof Integer)
return testObject.equals(typeAccess.readOrdinal(ordinal, fieldPosition));
return testObject == null && typeAccess.readOrdinal(ordinal, fieldPosition) < 0;
default:
}
throw new IllegalStateException("I don't know how to test equality for a " + schema.getFieldType(fieldPosition));
}
/**
* @param data the byte array
* @return The hash code for a byte array which would be returned from {@link #fieldHashCode(HollowObjectTypeDataAccess, int, int)}
*/
public static int byteArrayHashCode(byte[] data) {
return HashCodes.hashCode(data);
}
/**
* @param str the string value
* @return The hash code for a String which would be returned from {@link #fieldHashCode(HollowObjectTypeDataAccess, int, int)}
*/
public static int stringHashCode(String str) {
return HashCodes.hashCode(str);
}
/**
* @param bool the boolean value
* @return The hash code for a boolean which would be returned from {@link #fieldHashCode(HollowObjectTypeDataAccess, int, int)}
*/
public static int booleanHashCode(Boolean bool) {
return bool == null ? -1 : bool ? 1 : 0;
}
/**
* @param l the long value
* @return The hash code for a long which would be returned from {@link #fieldHashCode(HollowObjectTypeDataAccess, int, int)}
*/
public static int longHashCode(long l) {
return (int)l ^ (int)(l >> 32);
}
/**
* @param i the int value
* @return The hash code for an int which would be returned from {@link #fieldHashCode(HollowObjectTypeDataAccess, int, int)}
*/
public static int intHashCode(int i) {
return i;
}
/**
* @param f the float value
* @return The hash code for a float which would be returned from {@link #fieldHashCode(HollowObjectTypeDataAccess, int, int)}
*/
public static int floatHashCode(float f) {
return Float.floatToIntBits(f);
}
/**
* @param d the double value
* @return The hash code for a double which would be returned from {@link #fieldHashCode(HollowObjectTypeDataAccess, int, int)}
*/
public static int doubleHashCode(double d) {
long bits = Double.doubleToLongBits(d);
return longHashCode(bits);
}
}
| 9,049 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/HollowBlobInput.java | package com.netflix.hollow.core.read;
import static com.netflix.hollow.core.memory.MemoryMode.ON_HEAP;
import static com.netflix.hollow.core.memory.MemoryMode.SHARED_MEMORY_LAZY;
import static com.netflix.hollow.core.memory.encoding.BlobByteBuffer.MAX_SINGLE_BUFFER_CAPACITY;
import com.netflix.hollow.api.consumer.HollowConsumer;
import com.netflix.hollow.core.memory.MemoryMode;
import com.netflix.hollow.core.memory.encoding.BlobByteBuffer;
import java.io.ByteArrayInputStream;
import java.io.Closeable;
import java.io.DataInputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.RandomAccessFile;
import java.nio.channels.FileChannel;
/**
* This class provides an abstraction to help navigate between use of DataInputStream or RandomAccessFile
* as the underlying resource for Hollow Producer/Consumer Blob to support the different memory modes.
*/
public class HollowBlobInput implements Closeable {
private final MemoryMode memoryMode;
private Object input;
private BlobByteBuffer buffer;
private HollowBlobInput(MemoryMode memoryMode) {
this.memoryMode = memoryMode;
}
public MemoryMode getMemoryMode() {
return memoryMode;
}
/**
* Initialize the Hollow Blob Input object from the Hollow Consumer blob's Input Stream or Random Access File,
* depending on the configured memory mode. The returned HollowBlobInput object must be closed to free up resources.
*
* @param mode Configured memory mode
* @param blob Hollow Consumer blob
* @return the initialized Hollow Blob Input
* @throws IOException if the Hollow Blob Input couldn't be initialized
*/
public static HollowBlobInput modeBasedSelector(MemoryMode mode, HollowConsumer.Blob blob) throws IOException {
if (mode.equals(ON_HEAP)) {
return serial(blob.getInputStream());
} else if (mode.equals(SHARED_MEMORY_LAZY)) {
return randomAccess(blob.getFile());
} else {
throw new UnsupportedOperationException();
}
}
/**
* Initialize the Hollow Blob Input object from the Hollow Consumer blob's Input Stream or Random Access File,
* depending on the configured memory mode. The returned HollowBlobInput object must be closed to free up resources.
*
* @param mode Configured memory mode
* @param input Hollow Consumer blob
* @param partName the name of the optional part
* @return the initialized Hollow Blob Input
* @throws IOException if the Hollow Blob Input couldn't be initialized
*/
public static HollowBlobInput modeBasedSelector(MemoryMode mode, OptionalBlobPartInput input, String partName) throws IOException {
if (mode.equals(ON_HEAP)) {
return serial(input.getInputStream(partName));
} else if (mode.equals(SHARED_MEMORY_LAZY)) {
return randomAccess(input.getFile(partName));
} else {
throw new UnsupportedOperationException();
}
}
/**
* Initialize a random access Hollow Blob input object from a file. The returned HollowBlobInput object must be
* closed to free up resources.
*
* @param f file containing the Hollow blob
* @return a random access HollowBlobInput object
* @throws IOException if the mmap operation reported an IOException
*/
public static HollowBlobInput randomAccess(File f) throws IOException {
return randomAccess(f, MAX_SINGLE_BUFFER_CAPACITY);
}
/**
* Useful for testing with custom buffer capacity
*/
public static HollowBlobInput randomAccess(File f,int singleBufferCapacity) throws IOException {
HollowBlobInput hbi = new HollowBlobInput(SHARED_MEMORY_LAZY);
RandomAccessFile raf = new RandomAccessFile(f, "r");
hbi.input = raf;
FileChannel channel = ((RandomAccessFile) hbi.input).getChannel();
hbi.buffer = BlobByteBuffer.mmapBlob(channel, singleBufferCapacity);
return hbi;
}
/**
* Shorthand for calling {@link HollowBlobInput#serial(InputStream)} on a byte[]
*/
public static HollowBlobInput serial(byte[] bytes) {
InputStream is = new ByteArrayInputStream(bytes);
return serial(is);
}
/**
* Initialize a serial access Hollow Blob input object from an input stream. The returned HollowBlobInput object
* must be closed to free up resources.
*
* @param is input stream containing for Hollow blob data
* @return a serial access HollowBlobInput object
*/
public static HollowBlobInput serial(InputStream is) {
HollowBlobInput hbi = new HollowBlobInput(ON_HEAP);
hbi.input = new DataInputStream(is);
return hbi;
}
/**
* Reads the next byte of data from the input stream by relaying the call to the underlying {@code DataInputStream} or
* {@code RandomAccessFile}. The byte is returned as an integer in the range 0 to 255.
*
* @return an integer in the range 0 to 255
* @throws IOException if underlying {@code DataInputStream} or {@code RandomAccessFile}
* @throws UnsupportedOperationException if the input type wasn't one of {@code DataInputStream} or {@code RandomAccessFile}
*/
public int read() throws IOException {
if (input instanceof RandomAccessFile) {
return ((RandomAccessFile) input).read();
} else if (input instanceof DataInputStream) {
return ((DataInputStream) input).read();
} else {
throw new UnsupportedOperationException("Unknown Hollow Blob Input type");
}
}
/**
* Reads up to {@code len} bytes of data from the HollowBlobInput by relaying the call to the underlying
* {@code DataInputStream} or {@code RandomAccessFile} into an array of bytes. This method blocks until at
* least one byte of input is available.
*
* @return an integer in the range 0 to 255
* @throws IOException if underlying {@code DataInputStream} or {@code RandomAccessFile}
* @throws UnsupportedOperationException if the input type wasn't one of {@code DataInputStream} or {@code RandomAccessFile}
*/
public int read(byte b[], int off, int len) throws IOException {
if (input instanceof RandomAccessFile) {
return ((RandomAccessFile) input).read(b, off, len);
} else if (input instanceof DataInputStream) {
return ((DataInputStream) input).read(b, off, len);
} else {
throw new UnsupportedOperationException("Unknown Hollow Blob Input type");
}
}
/**
* Sets the file-pointer to the desired offset measured from the beginning of the file by relaying the call to the
* underlying {@code RandomAccessFile}. Operation not supported if the Hollow Blob Input is an {@code DataInputStream}.
*
* @param pos the position in bytes from the beginning of the file at which to set the file pointer to.
* @exception IOException if originated in the underlying {@code RandomAccessFile} implementation
* @exception UnsupportedOperationException if called when Hollow Blob Input is not a {@code RandomAccessFile}
*/
public void seek(long pos) throws IOException {
if (input instanceof RandomAccessFile) {
((RandomAccessFile) input).seek(pos);
} else if (input instanceof DataInputStream) {
throw new UnsupportedOperationException("Can not seek on Hollow Blob Input of type DataInputStream");
} else {
throw new UnsupportedOperationException("Unknown Hollow Blob Input type");
}
}
/**
* Returns the current offset in this input at which the next read would occur.
*
* @return current offset from the beginning of the file, in bytes
* @exception IOException if an I/O error occurs.
*/
public long getFilePointer() throws IOException {
if (input instanceof RandomAccessFile) {
return ((RandomAccessFile) input).getFilePointer();
} else if (input instanceof DataInputStream) {
throw new UnsupportedOperationException("Can not get file pointer for Hollow Blob Input of type DataInputStream");
} else {
throw new UnsupportedOperationException("Unknown Hollow Blob Input type");
}
}
/**
* Reads two bytes from the input (at the current file pointer) into a signed 16-bit short, and advances the offset
* in input.
*
* @return short value read from current offset in input
* @exception IOException if an I/O error occurs.
*/
public final short readShort() throws IOException {
if (input instanceof RandomAccessFile) {
return ((RandomAccessFile) input).readShort();
} else if (input instanceof DataInputStream) {
return ((DataInputStream) input).readShort();
} else {
throw new UnsupportedOperationException("Unknown Hollow Blob Input type");
}
}
/**
* Reads 4 bytes from the input (at the current file pointer) into a signed 32-bit int, and advances the offset
* in input.
*
* @return int value read from current offset in input
* @exception IOException if an I/O error occurs.
*/
public final int readInt() throws IOException {
if (input instanceof RandomAccessFile) {
return ((RandomAccessFile) input).readInt();
} else if (input instanceof DataInputStream) {
return ((DataInputStream) input).readInt();
} else {
throw new UnsupportedOperationException("Unknown Hollow Blob Input type");
}
}
/**
* Reads 8 bytes from the input (at the current file pointer) into a signed 64-bit long, and advances the offset
* in input.
*
* @return long value read from current offset in input
* @exception IOException if an I/O error occurs.
*/
public final long readLong() throws IOException {
if (input instanceof RandomAccessFile) {
return ((RandomAccessFile) input).readLong();
} else if (input instanceof DataInputStream) {
return ((DataInputStream) input).readLong();
} else {
throw new UnsupportedOperationException("Unknown Hollow Blob Input type");
}
}
/**
* Reads in a string from this file, encoded using <a href="DataInput.html#modified-utf-8">modified UTF-8</a>
* format, and advances the offset in input.
* @return UTF-8 string read from current offset in input
* @exception IOException if an I/O error occurs.
*/
public final String readUTF() throws IOException {
if (input instanceof RandomAccessFile) {
return ((RandomAccessFile) input).readUTF();
} else if (input instanceof DataInputStream) {
return ((DataInputStream) input).readUTF();
} else {
throw new UnsupportedOperationException("Unknown Hollow Blob Input type");
}
}
/**
* This method attempts to skip a specified number of bytes and returns the actual number of bytes skipped. The
* behavior is differed based on whether the backing resource is a RandomAccessFile or InputStream. For InputStream,
* (as implemented in FileInputStream) this method may skip more bytes than what are remaining in the backing file.
* It will produce no exception and the number of bytes skipped may include some number of bytes that were beyond the
* EOF of the backing file. The next read attempt from the stream after skipping past the end will result in -1
* indicating the end of the file was reached. For RandomAccessFile, this method will return the actual bytes skipped
* and does not go past EOF.
*
* @param n number of bytes to skip
* @return number of bytes skipped
* @throws IOException
*/
public long skipBytes(long n) throws IOException {
if (input instanceof RandomAccessFile) {
long total = 0;
int expected = 0;
int actual = 0;
do {
expected = (n-total) > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) (n-total);
actual = ((RandomAccessFile) input).skipBytes(expected); // RandomAccessFile::skipBytes supports int
total = total + actual;
} while (total < n && actual > 0);
return total;
} else if (input instanceof DataInputStream) {
return ((DataInputStream) input).skip(n); // InputStream::skip supports long
} else {
throw new UnsupportedOperationException("Unknown Hollow Blob Input type");
}
}
/**
* Closes underlying InputStream/RandomAccessFile and releases any system resources associated with the Hollow Blob Input.
* @throws IOException
*/
@Override
public void close() throws IOException {
if (input instanceof RandomAccessFile) {
((RandomAccessFile) input).close();
} else if (input instanceof DataInputStream) {
((DataInputStream) input).close();
} else {
throw new UnsupportedOperationException("Unknown Hollow Blob Input type");
}
}
public Object getInput() {
return input;
}
public BlobByteBuffer getBuffer() {
if (input instanceof RandomAccessFile) {
return buffer;
} else if (input instanceof DataInputStream) {
throw new UnsupportedOperationException("No buffer associated with underlying DataInputStream");
} else {
throw new UnsupportedOperationException("Unknown Hollow Blob Input type");
}
}
}
| 9,050 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/OptionalBlobPartInput.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read;
import com.netflix.hollow.core.memory.MemoryMode;
import java.io.BufferedInputStream;
import java.io.Closeable;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class OptionalBlobPartInput implements Closeable {
private final Map<String, Object> inputsByPartName;
private final List<InputStream> streamsToClose;
public OptionalBlobPartInput() {
this.inputsByPartName = new HashMap<>();
this.streamsToClose = new ArrayList<>();
}
public void addInput(String partName, File file) {
inputsByPartName.put(partName, file);
}
public void addInput(String partName, InputStream in) {
streamsToClose.add(in);
inputsByPartName.put(partName, in);
}
public File getFile(String partName) {
Object f = inputsByPartName.get(partName);
if(f instanceof File)
return (File)f;
throw new UnsupportedOperationException();
}
public InputStream getInputStream(String partName) throws IOException {
Object o = inputsByPartName.get(partName);
if(o instanceof File) {
InputStream stream = new BufferedInputStream(new FileInputStream((File)o));
streamsToClose.add(stream);
return stream;
}
return (InputStream)o;
}
public Set<String> getPartNames() {
return inputsByPartName.keySet();
}
public Map<String, HollowBlobInput> getInputsByPartName(MemoryMode mode) throws IOException {
Map<String, HollowBlobInput> map = new HashMap<>(inputsByPartName.size());
for(String part : getPartNames()) {
map.put(part, HollowBlobInput.modeBasedSelector(mode, this, part));
}
return map;
}
public Map<String, InputStream> getInputStreamsByPartName() throws IOException {
Map<String, InputStream> map = new HashMap<>(inputsByPartName.size());
for(String part : getPartNames()) {
map.put(part, getInputStream(part));
}
return map;
}
@Override
public void close() throws IOException {
IOException thrownException = null;
for(InputStream is : streamsToClose) {
try {
is.close();
} catch(IOException ex) {
thrownException = ex;
}
}
if(thrownException != null)
throw thrownException;
}
}
| 9,051 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/HollowTypeDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess;
import com.netflix.hollow.api.sampling.HollowSampler;
import com.netflix.hollow.api.sampling.HollowSamplingDirector;
import com.netflix.hollow.core.read.engine.HollowTypeReadState;
import com.netflix.hollow.core.read.filter.HollowFilterConfig;
import com.netflix.hollow.core.schema.HollowSchema;
/**
* A handle for all of the records of a specific type in a Hollow dataset. The most common type of {@link HollowTypeDataAccess}
* is a {@link HollowTypeReadState}.
*/
public interface HollowTypeDataAccess {
/**
* @return The {@link HollowDataAccess} for the dataset this type belongs to.
*/
HollowDataAccess getDataAccess();
/**
* @return the {@link HollowSchema} for this type.
*/
HollowSchema getSchema();
void setSamplingDirector(HollowSamplingDirector director);
void setFieldSpecificSamplingDirector(HollowFilterConfig fieldSpec, HollowSamplingDirector director);
void ignoreUpdateThreadForSampling(Thread t);
HollowSampler getSampler();
/**
* Optional operation
* @return the read state
*/
HollowTypeReadState getTypeState();
}
| 9,052 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/HollowListTypeDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess;
import com.netflix.hollow.core.read.engine.list.HollowListTypeReadState;
import com.netflix.hollow.core.schema.HollowListSchema;
/**
* A handle for all of the records of a specific LIST type in a Hollow dataset. The most common type of {@link HollowListTypeDataAccess}
* is a {@link HollowListTypeReadState}.
*
* @see HollowListSchema
*/
public interface HollowListTypeDataAccess extends HollowCollectionTypeDataAccess {
HollowListSchema getSchema();
/**
* @param ordinal the oridinal
* @param listIndex the list index
* @return the element at the specified listIndex from the list record at the specified ordinal
*/
int getElementOrdinal(int ordinal, int listIndex);
}
| 9,053 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/HollowDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess;
import com.netflix.hollow.core.HollowDataset;
import com.netflix.hollow.core.read.engine.HollowReadStateEngine;
import com.netflix.hollow.core.read.missing.MissingDataHandler;
import com.netflix.hollow.core.schema.HollowSchema;
import com.netflix.hollow.core.util.HollowObjectHashCodeFinder;
import java.util.Collection;
import java.util.List;
/**
* A {@link HollowDataAccess} is a consumer's root handle to a Hollow dataset.
* <p>
* The most common type of {@link HollowDataAccess} is a {@link HollowReadStateEngine}.
*
*/
public interface HollowDataAccess extends HollowDataset {
/**
* @param typeName the type name
* @return The handle to data for a specific type in this dataset.
*/
HollowTypeDataAccess getTypeDataAccess(String typeName);
/**
* @param typeName The type name
* @param ordinal optional parameter. When known, may provide a more optimal data access implementation for traversal of historical data access.
* @return The handle to data for a specific type in this dataset.
*/
HollowTypeDataAccess getTypeDataAccess(String typeName, int ordinal);
/**
* @return The names of all types in this dataset
*/
Collection<String> getAllTypes();
@Override
List<HollowSchema> getSchemas();
@Override
HollowSchema getSchema(String name);
@Deprecated
HollowObjectHashCodeFinder getHashCodeFinder();
MissingDataHandler getMissingDataHandler();
void resetSampling();
boolean hasSampleResults();
}
| 9,054 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/HollowSetTypeDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess;
import com.netflix.hollow.core.read.engine.set.HollowSetTypeReadState;
import com.netflix.hollow.core.read.iterator.HollowOrdinalIterator;
import com.netflix.hollow.core.schema.HollowSetSchema;
/**
* A handle for all of the records of a specific SET type in a Hollow dataset. The most common type of {@link HollowSetTypeDataAccess}
* is a {@link HollowSetTypeReadState}.
*
* @see HollowSetSchema
*/
public interface HollowSetTypeDataAccess extends HollowCollectionTypeDataAccess {
HollowSetSchema getSchema();
/**
* Note that this method will only reliably work on unhashed sets (sets without a defined hash key or custom defined hash code).
* <p>
* Generally, the method {@link #findElement(int, Object...)} may be more useful.
*
* @param ordinal the ordinal
* @param value the value
* @return whether or not the <b>unhashed</b> set at the specified ordinal contains the specified element ordinal.
*/
boolean contains(int ordinal, int value);
/**
* Generally, the method {@link #findElement(int, Object...)} may be more useful.
*
* @param ordinal the ordinal
* @param value the value
* @param hashCode the hash code
* @return whether or not the set at the specified ordinal contains the specified element ordinal with the specified hashCode.
*/
boolean contains(int ordinal, int value, int hashCode);
/**
* Returns The matching ordinal of the element from the set at the specified ordinal which matches the provided hash key.
*
* @param ordinal the ordinal
* @param hashKey the hash keys
* @return the matching element's ordinal, or {@link com.netflix.hollow.core.HollowConstants#ORDINAL_NONE} if no such element exists.
*/
int findElement(int ordinal, Object... hashKey);
int relativeBucketValue(int ordinal, int bucketIndex);
/**
* @param ordinal the ordinal
* @param hashCode the hash code
* @return a {@link HollowOrdinalIterator} over any elements from the set at the specified which potentially match the specified hashCode.
*/
HollowOrdinalIterator potentialMatchOrdinalIterator(int ordinal, int hashCode);
}
| 9,055 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/HollowMapTypeDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess;
import com.netflix.hollow.core.read.engine.map.HollowMapTypeReadState;
import com.netflix.hollow.core.read.iterator.HollowMapEntryOrdinalIterator;
import com.netflix.hollow.core.schema.HollowMapSchema;
/**
* A handle for all of the records of a specific MAP type in a Hollow dataset. The most common type of {@link HollowMapTypeDataAccess}
* is a {@link HollowMapTypeReadState}.
*
* @see HollowMapSchema
*/
public interface HollowMapTypeDataAccess extends HollowTypeDataAccess {
public HollowMapSchema getSchema();
public int size(int ordinal);
public int get(int ordinal, int keyOrdinal);
public int get(int ordinal, int keyOrdinal, int hashCode);
public int findKey(int ordinal, Object... hashKey);
public int findValue(int ordinal, Object... hashKey);
public long findEntry(int ordinal, Object... hashKey);
public HollowMapEntryOrdinalIterator potentialMatchOrdinalIterator(int ordinal, int hashCode);
public HollowMapEntryOrdinalIterator ordinalIterator(int ordinal);
public long relativeBucket(int ordinal, int bucketIndex);
}
| 9,056 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/HollowObjectTypeDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess;
import com.netflix.hollow.core.read.engine.object.HollowObjectTypeReadState;
import com.netflix.hollow.core.schema.HollowObjectSchema;
import com.netflix.hollow.core.schema.HollowObjectSchema.FieldType;
/**
* A handle for all of the records of a specific OBJECT type in a Hollow dataset. The most common type of {@link HollowObjectTypeDataAccess}
* is a {@link HollowObjectTypeReadState}.
*
* @see HollowObjectSchema
*/
public interface HollowObjectTypeDataAccess extends HollowTypeDataAccess {
HollowObjectSchema getSchema();
/**
* @param ordinal the ordinal
* @param fieldIndex the field index
* @return whether or not the record with the specified ordinal's field at the specified field index is null.
*/
boolean isNull(int ordinal, int fieldIndex);
/**
* @param ordinal the ordinal
* @param fieldIndex the field index
* @return the {@link FieldType#REFERENCE} field's value at the specified fieldIndex for the specified ordinal.
*/
int readOrdinal(int ordinal, int fieldIndex);
/**
* @param ordinal the ordinal
* @param fieldIndex the field index
* @return the {@link FieldType#INT} field's value at the specified fieldIndex for the specified ordinal.
*/
int readInt(int ordinal, int fieldIndex);
/**
* @param ordinal the ordinal
* @param fieldIndex the field index
* @return the {@link FieldType#FLOAT} field's value at the specified fieldIndex for the specified ordinal.
*/
float readFloat(int ordinal, int fieldIndex);
/**
* @param ordinal the ordinal
* @param fieldIndex the field index
* @return the {@link FieldType#DOUBLE} field's value at the specified fieldIndex for the specified ordinal.
*/
double readDouble(int ordinal, int fieldIndex);
/**
* @param ordinal the ordinal
* @param fieldIndex the field index
* @return the {@link FieldType#LONG} field's value at the specified fieldIndex for the specified ordinal.
*/
long readLong(int ordinal, int fieldIndex);
/**
* @param ordinal the ordinal
* @param fieldIndex the field index
* @return the {@link FieldType#BOOLEAN} field's value at the specified fieldIndex for the specified ordinal.
*/
Boolean readBoolean(int ordinal, int fieldIndex);
/**
* @param ordinal the ordinal
* @param fieldIndex the field index
* @return the {@link FieldType#BYTES} field's value at the specified fieldIndex for the specified ordinal.
*/
byte[] readBytes(int ordinal, int fieldIndex);
/**
* @param ordinal the ordinal
* @param fieldIndex the field index
* @return the {@link FieldType#STRING} field's value at the specified fieldIndex for the specified ordinal.
*/
String readString(int ordinal, int fieldIndex);
/**
* @param ordinal the ordinal
* @param fieldIndex the field index
* @param testValue the value to get against
* @return whether or not the {@link FieldType#STRING} field's value at the specified fieldIndex for the specified ordinal is exactly equal to the given value.
*/
boolean isStringFieldEqual(int ordinal, int fieldIndex, String testValue);
/**
* @param ordinal the ordinal
* @param fieldIndex the field index
* @return a hashCode for the {@link FieldType#BYTES} or {@link FieldType#STRING} field's value at the specified fieldIndex for the specified ordinal.
*/
int findVarLengthFieldHashCode(int ordinal, int fieldIndex);
}
| 9,057 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/HollowCollectionTypeDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess;
import com.netflix.hollow.core.read.engine.HollowCollectionTypeReadState;
import com.netflix.hollow.core.read.iterator.HollowOrdinalIterator;
import com.netflix.hollow.core.schema.HollowCollectionSchema;
import com.netflix.hollow.core.schema.HollowListSchema;
import com.netflix.hollow.core.schema.HollowSetSchema;
/**
* A handle for all of the records of a specific LIST or SET type in a Hollow dataset. The most common type of {@link HollowCollectionTypeDataAccess}
* is a {@link HollowCollectionTypeReadState}.
*
* @see HollowListSchema
* @see HollowSetSchema
*/
public interface HollowCollectionTypeDataAccess extends HollowTypeDataAccess {
/**
* @param ordinal the ordinal
* @return the number of elements contained in the set at the specified ordinal.
*/
int size(int ordinal);
/**
* @param ordinal the ordinal
* @return an iterator over all elements in the collection.
*/
HollowOrdinalIterator ordinalIterator(int ordinal);
HollowCollectionSchema getSchema();
}
| 9,058 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/proxy/HollowListProxyDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess.proxy;
import com.netflix.hollow.core.read.dataaccess.HollowListTypeDataAccess;
import com.netflix.hollow.core.read.iterator.HollowListOrdinalIterator;
import com.netflix.hollow.core.read.iterator.HollowOrdinalIterator;
import com.netflix.hollow.core.schema.HollowListSchema;
/**
* A {@link HollowTypeProxyDataAccess} for a LIST type.
*
* @see HollowProxyDataAccess
*/
public class HollowListProxyDataAccess extends HollowTypeProxyDataAccess implements HollowListTypeDataAccess {
public HollowListProxyDataAccess(HollowProxyDataAccess dataAccess) {
super(dataAccess);
}
@Override
public int size(int ordinal) {
return currentDataAccess().size(ordinal);
}
@Override
public HollowOrdinalIterator ordinalIterator(int ordinal) {
return new HollowListOrdinalIterator(ordinal, this);
}
@Override
public HollowListSchema getSchema() {
return currentDataAccess().getSchema();
}
@Override
public int getElementOrdinal(int ordinal, int listIndex) {
return currentDataAccess().getElementOrdinal(ordinal, listIndex);
}
private HollowListTypeDataAccess currentDataAccess() {
return (HollowListTypeDataAccess)currentDataAccess;
}
}
| 9,059 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/proxy/HollowObjectProxyDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess.proxy;
import com.netflix.hollow.core.read.dataaccess.HollowObjectTypeDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowTypeDataAccess;
import com.netflix.hollow.core.schema.HollowObjectSchema;
/**
* A {@link HollowTypeProxyDataAccess} for an OBJECT type.
*
* @see HollowProxyDataAccess
*/
public class HollowObjectProxyDataAccess extends HollowTypeProxyDataAccess implements HollowObjectTypeDataAccess {
public HollowObjectProxyDataAccess(HollowProxyDataAccess dataAccess) {
super(dataAccess);
}
public void setCurrentDataAccess(HollowTypeDataAccess currentDataAccess) {
this.currentDataAccess = (HollowObjectTypeDataAccess) currentDataAccess;
}
@Override
public HollowObjectSchema getSchema() {
return ((HollowObjectTypeDataAccess) currentDataAccess).getSchema();
}
@Override
public boolean isNull(int ordinal, int fieldIndex) {
return ((HollowObjectTypeDataAccess) currentDataAccess).isNull(ordinal, fieldIndex);
}
@Override
public int readOrdinal(int ordinal, int fieldIndex) {
return ((HollowObjectTypeDataAccess) currentDataAccess).readOrdinal(ordinal, fieldIndex);
}
@Override
public int readInt(int ordinal, int fieldIndex) {
return ((HollowObjectTypeDataAccess) currentDataAccess).readInt(ordinal, fieldIndex);
}
@Override
public float readFloat(int ordinal, int fieldIndex) {
return ((HollowObjectTypeDataAccess) currentDataAccess).readFloat(ordinal, fieldIndex);
}
@Override
public double readDouble(int ordinal, int fieldIndex) {
return ((HollowObjectTypeDataAccess) currentDataAccess).readDouble(ordinal, fieldIndex);
}
@Override
public long readLong(int ordinal, int fieldIndex) {
return ((HollowObjectTypeDataAccess) currentDataAccess).readLong(ordinal, fieldIndex);
}
@Override
public Boolean readBoolean(int ordinal, int fieldIndex) {
return ((HollowObjectTypeDataAccess) currentDataAccess).readBoolean(ordinal, fieldIndex);
}
@Override
public byte[] readBytes(int ordinal, int fieldIndex) {
return ((HollowObjectTypeDataAccess) currentDataAccess).readBytes(ordinal, fieldIndex);
}
@Override
public String readString(int ordinal, int fieldIndex) {
return ((HollowObjectTypeDataAccess) currentDataAccess).readString(ordinal, fieldIndex);
}
@Override
public boolean isStringFieldEqual(int ordinal, int fieldIndex, String testValue) {
return ((HollowObjectTypeDataAccess) currentDataAccess).isStringFieldEqual(ordinal, fieldIndex, testValue);
}
@Override
public int findVarLengthFieldHashCode(int ordinal, int fieldIndex) {
return ((HollowObjectTypeDataAccess) currentDataAccess).findVarLengthFieldHashCode(ordinal, fieldIndex);
}
}
| 9,060 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/proxy/HollowSetProxyDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess.proxy;
import com.netflix.hollow.core.read.dataaccess.HollowSetTypeDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowTypeDataAccess;
import com.netflix.hollow.core.read.engine.set.PotentialMatchHollowSetOrdinalIterator;
import com.netflix.hollow.core.read.iterator.HollowOrdinalIterator;
import com.netflix.hollow.core.read.iterator.HollowSetOrdinalIterator;
import com.netflix.hollow.core.schema.HollowSetSchema;
/**
* A {@link HollowTypeProxyDataAccess} for a SET type.
*
* @see HollowProxyDataAccess
*/
public class HollowSetProxyDataAccess extends HollowTypeProxyDataAccess implements HollowSetTypeDataAccess{
public HollowSetProxyDataAccess(HollowProxyDataAccess dataAccess) {
super(dataAccess);
}
public void setCurrentDataAccess(HollowTypeDataAccess currentDataAccess) {
this.currentDataAccess = (HollowSetTypeDataAccess) currentDataAccess;
}
@Override
public int size(int ordinal) {
return currentDataAccess().size(ordinal);
}
@Override
public HollowOrdinalIterator ordinalIterator(int ordinal) {
return new HollowSetOrdinalIterator(ordinal, this);
}
@Override
public HollowSetSchema getSchema() {
return currentDataAccess().getSchema();
}
@Override
public boolean contains(int ordinal, int value) {
return currentDataAccess().contains(ordinal, value);
}
@Override
public boolean contains(int ordinal, int value, int hashCode) {
return currentDataAccess().contains(ordinal, value, hashCode);
}
@Override
public int findElement(int ordinal, Object... hashKey) {
return currentDataAccess().findElement(ordinal, hashKey);
}
@Override
public int relativeBucketValue(int ordinal, int bucketIndex) {
return currentDataAccess().relativeBucketValue(ordinal, bucketIndex);
}
@Override
public HollowOrdinalIterator potentialMatchOrdinalIterator(int ordinal, int hashCode) {
return new PotentialMatchHollowSetOrdinalIterator(ordinal, this, hashCode);
}
private HollowSetTypeDataAccess currentDataAccess() {
return (HollowSetTypeDataAccess)currentDataAccess;
}
}
| 9,061 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/proxy/HollowProxyDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess.proxy;
import com.netflix.hollow.core.read.dataaccess.HollowDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowListTypeDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowMapTypeDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowObjectTypeDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowSetTypeDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowTypeDataAccess;
import com.netflix.hollow.core.read.dataaccess.disabled.HollowDisabledDataAccess;
import com.netflix.hollow.core.read.dataaccess.disabled.HollowListDisabledDataAccess;
import com.netflix.hollow.core.read.dataaccess.disabled.HollowMapDisabledDataAccess;
import com.netflix.hollow.core.read.dataaccess.disabled.HollowObjectDisabledDataAccess;
import com.netflix.hollow.core.read.dataaccess.disabled.HollowSetDisabledDataAccess;
import com.netflix.hollow.core.read.engine.HollowReadStateEngine;
import com.netflix.hollow.core.read.missing.MissingDataHandler;
import com.netflix.hollow.core.schema.HollowSchema;
import com.netflix.hollow.core.util.HollowObjectHashCodeFinder;
import com.netflix.hollow.tools.history.HollowHistoricalStateDataAccess;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* A HollowProxyDataAccess delegates all calls to another {@link HollowDataAccess}.
* <p>
* This is useful when a {@link com.netflix.hollow.api.consumer.HollowConsumer.ObjectLongevityConfig} calls for the object longevity feature to be enabled.
* In this case, when a state transition occurs, all existing objects backed by the latest {@link HollowReadStateEngine}
* will need to be backed by a {@link HollowHistoricalStateDataAccess}.
*
*/
public class HollowProxyDataAccess implements HollowDataAccess {
private HollowDataAccess currentDataAccess;
private final ConcurrentHashMap<String, HollowTypeProxyDataAccess> typeDataAccessMap;
public HollowProxyDataAccess() {
this.typeDataAccessMap = new ConcurrentHashMap<String, HollowTypeProxyDataAccess>();
}
public void setDataAccess(HollowDataAccess currentDataAccess) {
this.currentDataAccess = currentDataAccess;
for(String type : currentDataAccess.getAllTypes()) {
HollowTypeDataAccess typeDataAccess = currentDataAccess.getTypeDataAccess(type);
HollowTypeProxyDataAccess proxyDataAccess = typeDataAccessMap.get(type);
if(proxyDataAccess == null) {
if(typeDataAccess instanceof HollowObjectTypeDataAccess) {
proxyDataAccess = new HollowObjectProxyDataAccess(this);
} else if(typeDataAccess instanceof HollowListTypeDataAccess) {
proxyDataAccess = new HollowListProxyDataAccess(this);
} else if(typeDataAccess instanceof HollowSetTypeDataAccess) {
proxyDataAccess = new HollowSetProxyDataAccess(this);
} else if(typeDataAccess instanceof HollowMapTypeDataAccess) {
proxyDataAccess = new HollowMapProxyDataAccess(this);
}
typeDataAccessMap.put(type, proxyDataAccess);
}
proxyDataAccess.setCurrentDataAccess(typeDataAccess);
}
}
public void disableDataAccess() {
this.currentDataAccess = HollowDisabledDataAccess.INSTANCE;
for(Map.Entry<String, HollowTypeProxyDataAccess> entry : typeDataAccessMap.entrySet()) {
HollowTypeProxyDataAccess proxy = entry.getValue();
if(proxy instanceof HollowObjectProxyDataAccess) {
proxy.setCurrentDataAccess(HollowObjectDisabledDataAccess.INSTANCE);
} else if(proxy instanceof HollowListProxyDataAccess) {
proxy.setCurrentDataAccess(HollowListDisabledDataAccess.INSTANCE);
} else if(proxy instanceof HollowSetProxyDataAccess) {
proxy.setCurrentDataAccess(HollowSetDisabledDataAccess.INSTANCE);
} else if(proxy instanceof HollowMapProxyDataAccess) {
proxy.setCurrentDataAccess(HollowMapDisabledDataAccess.INSTANCE);
}
}
}
@Override
public HollowTypeDataAccess getTypeDataAccess(String typeName) {
return typeDataAccessMap.get(typeName);
}
@Override
public HollowTypeDataAccess getTypeDataAccess(String typeName, int ordinal) {
return typeDataAccessMap.get(typeName);
}
@Override
public HollowObjectHashCodeFinder getHashCodeFinder() {
return currentDataAccess.getHashCodeFinder();
}
@Override
public MissingDataHandler getMissingDataHandler() {
return currentDataAccess.getMissingDataHandler();
}
@Override
public Collection<String> getAllTypes() {
return typeDataAccessMap.keySet();
}
@Override
public List<HollowSchema> getSchemas() {
return currentDataAccess.getSchemas();
}
@Override
public HollowSchema getSchema(String name) {
return currentDataAccess.getSchema(name);
}
@Override
public HollowSchema getNonNullSchema(String name) {
return currentDataAccess.getNonNullSchema(name);
}
@Override
public void resetSampling() {
currentDataAccess.resetSampling();
}
@Override
public boolean hasSampleResults() {
return currentDataAccess.hasSampleResults();
}
public HollowDataAccess getProxiedDataAccess() {
return currentDataAccess;
}
}
| 9,062 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/proxy/HollowTypeProxyDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess.proxy;
import com.netflix.hollow.api.sampling.HollowSampler;
import com.netflix.hollow.api.sampling.HollowSamplingDirector;
import com.netflix.hollow.core.read.dataaccess.HollowDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowTypeDataAccess;
import com.netflix.hollow.core.read.engine.HollowTypeReadState;
import com.netflix.hollow.core.read.filter.HollowFilterConfig;
/**
* A {@link HollowTypeDataAccess} which delegates all calls to another {@link HollowTypeDataAccess}
*
* @see HollowProxyDataAccess
*/
public abstract class HollowTypeProxyDataAccess implements HollowTypeDataAccess {
protected final HollowProxyDataAccess dataAccess;
protected HollowTypeDataAccess currentDataAccess;
public HollowTypeProxyDataAccess(HollowProxyDataAccess dataAccess) {
this.dataAccess = dataAccess;
}
public void setCurrentDataAccess(HollowTypeDataAccess typeDataAccess) {
this.currentDataAccess = typeDataAccess;
}
public HollowTypeDataAccess getCurrentDataAccess() {
return currentDataAccess;
}
@Override
public HollowDataAccess getDataAccess() {
return dataAccess;
}
@Override
public HollowTypeReadState getTypeState() {
return currentDataAccess.getTypeState();
}
@Override
public void setSamplingDirector(HollowSamplingDirector director) {
currentDataAccess.setSamplingDirector(director);
}
@Override
public void setFieldSpecificSamplingDirector(HollowFilterConfig fieldSpec, HollowSamplingDirector director) {
currentDataAccess.setFieldSpecificSamplingDirector(fieldSpec, director);
}
@Override
public void ignoreUpdateThreadForSampling(Thread t) {
currentDataAccess.ignoreUpdateThreadForSampling(t);
}
@Override
public HollowSampler getSampler() {
return currentDataAccess.getSampler();
}
}
| 9,063 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/proxy/HollowMapProxyDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess.proxy;
import com.netflix.hollow.core.read.dataaccess.HollowMapTypeDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowTypeDataAccess;
import com.netflix.hollow.core.read.engine.map.PotentialMatchHollowMapEntryOrdinalIteratorImpl;
import com.netflix.hollow.core.read.iterator.HollowMapEntryOrdinalIterator;
import com.netflix.hollow.core.read.iterator.HollowMapEntryOrdinalIteratorImpl;
import com.netflix.hollow.core.schema.HollowMapSchema;
/**
* A {@link HollowTypeProxyDataAccess} for a MAP type.
*
* @see HollowProxyDataAccess
*/
public class HollowMapProxyDataAccess extends HollowTypeProxyDataAccess implements HollowMapTypeDataAccess{
public HollowMapProxyDataAccess(HollowProxyDataAccess dataAccess) {
super(dataAccess);
}
public void setCurrentDataAccess(HollowTypeDataAccess currentDataAccess) {
this.currentDataAccess = (HollowMapTypeDataAccess) currentDataAccess;
}
@Override
public HollowMapSchema getSchema() {
return currentDataAccess().getSchema();
}
@Override
public int size(int ordinal) {
return currentDataAccess().size(ordinal);
}
@Override
public int get(int ordinal, int keyOrdinal) {
return currentDataAccess().get(ordinal, keyOrdinal);
}
@Override
public int get(int ordinal, int keyOrdinal, int hashCode) {
return currentDataAccess().get(ordinal, keyOrdinal, hashCode);
}
@Override
public int findKey(int ordinal, Object... hashKey) {
return currentDataAccess().findKey(ordinal, hashKey);
}
@Override
public int findValue(int ordinal, Object... hashKey) {
return currentDataAccess().findValue(ordinal, hashKey);
}
@Override
public long findEntry(int ordinal, Object... hashKey) {
return currentDataAccess().findEntry(ordinal, hashKey);
}
@Override
public HollowMapEntryOrdinalIterator potentialMatchOrdinalIterator(int ordinal, int hashCode) {
return new PotentialMatchHollowMapEntryOrdinalIteratorImpl(ordinal, this, hashCode);
}
@Override
public HollowMapEntryOrdinalIterator ordinalIterator(int ordinal) {
return new HollowMapEntryOrdinalIteratorImpl(ordinal, this);
}
@Override
public long relativeBucket(int ordinal, int bucketIndex) {
return currentDataAccess().relativeBucket(ordinal, bucketIndex);
}
private HollowMapTypeDataAccess currentDataAccess() {
return (HollowMapTypeDataAccess)currentDataAccess;
}
}
| 9,064 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/missing/HollowSetMissingDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess.missing;
import com.netflix.hollow.api.sampling.HollowSampler;
import com.netflix.hollow.api.sampling.HollowSamplingDirector;
import com.netflix.hollow.api.sampling.HollowSetSampler;
import com.netflix.hollow.core.read.dataaccess.HollowDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowSetTypeDataAccess;
import com.netflix.hollow.core.read.engine.HollowTypeReadState;
import com.netflix.hollow.core.read.filter.HollowFilterConfig;
import com.netflix.hollow.core.read.iterator.HollowOrdinalIterator;
import com.netflix.hollow.core.read.missing.MissingDataHandler;
import com.netflix.hollow.core.schema.HollowSetSchema;
/**
* Used when an entire SET type, which is expected by a Generated Hollow API, is missing from the actual data.
*/
public class HollowSetMissingDataAccess implements HollowSetTypeDataAccess {
private final HollowDataAccess dataAccess;
private final String typeName;
public HollowSetMissingDataAccess(HollowDataAccess dataAccess, String typeName) {
this.dataAccess = dataAccess;
this.typeName = typeName;
}
@Override
public HollowDataAccess getDataAccess() {
return dataAccess;
}
@Override
public HollowSetSchema getSchema() {
return (HollowSetSchema) missingDataHandler().handleSchema(typeName);
}
@Override
public int size(int ordinal) {
return missingDataHandler().handleSetSize(typeName, ordinal);
}
@Override
public boolean contains(int ordinal, int elementOrdinal) {
return missingDataHandler().handleSetContainsElement(typeName, ordinal, elementOrdinal, elementOrdinal);
}
@Override
public boolean contains(int ordinal, int elementOrdinal, int hashCode) {
return missingDataHandler().handleSetContainsElement(typeName, ordinal, elementOrdinal, hashCode);
}
@Override
public int findElement(int ordinal, Object... hashKey) {
return missingDataHandler().handleSetFindElement(typeName, ordinal, hashKey);
}
@Override
public int relativeBucketValue(int ordinal, int bucketIndex) {
throw new UnsupportedOperationException("Set type " + typeName + " is missing, but an attempt was made to access relative bucket values");
}
@Override
public HollowOrdinalIterator potentialMatchOrdinalIterator(int ordinal, int hashCode) {
return missingDataHandler().handleSetPotentialMatchIterator(typeName, ordinal, hashCode);
}
@Override
public HollowOrdinalIterator ordinalIterator(int ordinal) {
return missingDataHandler().handleSetIterator(typeName, ordinal);
}
MissingDataHandler missingDataHandler() {
return dataAccess.getMissingDataHandler();
}
@Override
public HollowTypeReadState getTypeState() {
throw new UnsupportedOperationException("No HollowTypeReadState exists for " + typeName);
}
@Override
public void setSamplingDirector(HollowSamplingDirector director) {
}
@Override
public void setFieldSpecificSamplingDirector(HollowFilterConfig fieldSpec, HollowSamplingDirector director) {
}
@Override
public void ignoreUpdateThreadForSampling(Thread t) {
}
@Override
public HollowSampler getSampler() {
return HollowSetSampler.NULL_SAMPLER;
}
}
| 9,065 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/missing/HollowMapMissingDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess.missing;
import com.netflix.hollow.api.sampling.HollowMapSampler;
import com.netflix.hollow.api.sampling.HollowSampler;
import com.netflix.hollow.api.sampling.HollowSamplingDirector;
import com.netflix.hollow.core.read.dataaccess.HollowDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowMapTypeDataAccess;
import com.netflix.hollow.core.read.engine.HollowTypeReadState;
import com.netflix.hollow.core.read.filter.HollowFilterConfig;
import com.netflix.hollow.core.read.iterator.HollowMapEntryOrdinalIterator;
import com.netflix.hollow.core.read.missing.MissingDataHandler;
import com.netflix.hollow.core.schema.HollowMapSchema;
/**
* Used when an entire MAP type, which is expected by a Generated Hollow API, is missing from the actual data.
*/
public class HollowMapMissingDataAccess implements HollowMapTypeDataAccess {
private final HollowDataAccess dataAccess;
private final String typeName;
public HollowMapMissingDataAccess(HollowDataAccess dataAccess, String typeName) {
this.dataAccess = dataAccess;
this.typeName = typeName;
}
@Override
public HollowDataAccess getDataAccess() {
return dataAccess;
}
@Override
public HollowMapSchema getSchema() {
return (HollowMapSchema) missingDataHandler().handleSchema(typeName);
}
@Override
public int size(int ordinal) {
return missingDataHandler().handleMapSize(typeName, ordinal);
}
@Override
public int get(int ordinal, int keyOrdinal) {
return missingDataHandler().handleMapGet(typeName, ordinal, keyOrdinal, keyOrdinal);
}
@Override
public int get(int ordinal, int keyOrdinal, int hashCode) {
return missingDataHandler().handleMapGet(typeName, ordinal, keyOrdinal, hashCode);
}
@Override
public int findKey(int ordinal, Object... hashKey) {
return missingDataHandler().handleMapFindKey(typeName, ordinal, hashKey);
}
@Override
public int findValue(int ordinal, Object... hashKey) {
return missingDataHandler().handleMapFindValue(typeName, ordinal, hashKey);
}
@Override
public long findEntry(int ordinal, Object... hashKey) {
return missingDataHandler().handleMapFindEntry(typeName, ordinal, hashKey);
}
@Override
public HollowMapEntryOrdinalIterator potentialMatchOrdinalIterator(int ordinal, int hashCode) {
return missingDataHandler().handleMapPotentialMatchOrdinalIterator(typeName, ordinal, hashCode);
}
@Override
public HollowMapEntryOrdinalIterator ordinalIterator(int ordinal) {
return missingDataHandler().handleMapOrdinalIterator(typeName, ordinal);
}
@Override
public long relativeBucket(int ordinal, int bucketIndex) {
throw new UnsupportedOperationException();
}
private MissingDataHandler missingDataHandler() {
return dataAccess.getMissingDataHandler();
}
@Override
public HollowTypeReadState getTypeState() {
throw new UnsupportedOperationException("No HollowTypeReadState exists for " + typeName);
}
@Override
public void setSamplingDirector(HollowSamplingDirector director) {
}
@Override
public void setFieldSpecificSamplingDirector(HollowFilterConfig fieldSpec, HollowSamplingDirector director) {
}
@Override
public void ignoreUpdateThreadForSampling(Thread t) {
}
@Override
public HollowSampler getSampler() {
return HollowMapSampler.NULL_SAMPLER;
}
}
| 9,066 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/missing/HollowListMissingDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess.missing;
import com.netflix.hollow.api.sampling.HollowListSampler;
import com.netflix.hollow.api.sampling.HollowSampler;
import com.netflix.hollow.api.sampling.HollowSamplingDirector;
import com.netflix.hollow.core.read.dataaccess.HollowDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowListTypeDataAccess;
import com.netflix.hollow.core.read.engine.HollowTypeReadState;
import com.netflix.hollow.core.read.filter.HollowFilterConfig;
import com.netflix.hollow.core.read.iterator.HollowOrdinalIterator;
import com.netflix.hollow.core.read.missing.MissingDataHandler;
import com.netflix.hollow.core.schema.HollowListSchema;
/**
* Used when an entire LIST type, which is expected by a Generated Hollow API, is missing from the actual data.
*/
public class HollowListMissingDataAccess implements HollowListTypeDataAccess {
private final HollowDataAccess dataAccess;
private final String typeName;
public HollowListMissingDataAccess(HollowDataAccess dataAccess, String typeName) {
this.dataAccess = dataAccess;
this.typeName = typeName;
}
@Override
public HollowDataAccess getDataAccess() {
return dataAccess;
}
@Override
public HollowListSchema getSchema() {
return (HollowListSchema) missingDataHandler().handleSchema(typeName);
}
@Override
public int getElementOrdinal(int ordinal, int listIndex) {
return missingDataHandler().handleListElementOrdinal(typeName, ordinal, listIndex);
}
@Override
public int size(int ordinal) {
return missingDataHandler().handleListSize(typeName, ordinal);
}
@Override
public HollowOrdinalIterator ordinalIterator(int ordinal) {
return missingDataHandler().handleListIterator(typeName, ordinal);
}
private MissingDataHandler missingDataHandler() {
return dataAccess.getMissingDataHandler();
}
@Override
public HollowTypeReadState getTypeState() {
throw new UnsupportedOperationException("No HollowTypeReadState exists for " + typeName);
}
@Override
public void setSamplingDirector(HollowSamplingDirector director) {
}
@Override
public void setFieldSpecificSamplingDirector(HollowFilterConfig fieldSpec, HollowSamplingDirector director) {
}
@Override
public void ignoreUpdateThreadForSampling(Thread t) {
}
@Override
public HollowSampler getSampler() {
return HollowListSampler.NULL_SAMPLER;
}
}
| 9,067 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/missing/HollowObjectMissingDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess.missing;
import com.netflix.hollow.api.sampling.HollowObjectSampler;
import com.netflix.hollow.api.sampling.HollowSampler;
import com.netflix.hollow.api.sampling.HollowSamplingDirector;
import com.netflix.hollow.core.memory.encoding.HashCodes;
import com.netflix.hollow.core.read.dataaccess.HollowDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowObjectTypeDataAccess;
import com.netflix.hollow.core.read.engine.HollowTypeReadState;
import com.netflix.hollow.core.read.filter.HollowFilterConfig;
import com.netflix.hollow.core.read.missing.MissingDataHandler;
import com.netflix.hollow.core.schema.HollowObjectSchema;
import com.netflix.hollow.core.schema.HollowObjectSchema.FieldType;
/**
* Used when an entire OBJECT type, which is expected by a Generated Hollow API, is missing from the actual data.
*/
public class HollowObjectMissingDataAccess implements HollowObjectTypeDataAccess {
private final HollowDataAccess dataAccess;
private final String typeName;
public HollowObjectMissingDataAccess(HollowDataAccess dataAccess, String typeName) {
this.dataAccess = dataAccess;
this.typeName = typeName;
}
@Override
public HollowDataAccess getDataAccess() {
return dataAccess;
}
@Override
public HollowObjectSchema getSchema() {
return (HollowObjectSchema) missingDataHandler().handleSchema(typeName);
}
@Override
public boolean isNull(int ordinal, int fieldIndex) {
return missingDataHandler().handleIsNull(typeName, ordinal, fieldName(fieldIndex));
}
@Override
public int readOrdinal(int ordinal, int fieldIndex) {
return missingDataHandler().handleReferencedOrdinal(typeName, ordinal, fieldName(fieldIndex));
}
@Override
public int readInt(int ordinal, int fieldIndex) {
return missingDataHandler().handleInt(typeName, ordinal, fieldName(fieldIndex));
}
@Override
public float readFloat(int ordinal, int fieldIndex) {
return missingDataHandler().handleFloat(typeName, ordinal, fieldName(fieldIndex));
}
@Override
public double readDouble(int ordinal, int fieldIndex) {
return missingDataHandler().handleDouble(typeName, ordinal, fieldName(fieldIndex));
}
@Override
public long readLong(int ordinal, int fieldIndex) {
return missingDataHandler().handleLong(typeName, ordinal, fieldName(fieldIndex));
}
@Override
public Boolean readBoolean(int ordinal, int fieldIndex) {
return missingDataHandler().handleBoolean(typeName, ordinal, fieldName(fieldIndex));
}
@Override
public byte[] readBytes(int ordinal, int fieldIndex) {
return missingDataHandler().handleBytes(typeName, ordinal, fieldName(fieldIndex));
}
@Override
public String readString(int ordinal, int fieldIndex) {
return missingDataHandler().handleString(typeName, ordinal, fieldName(fieldIndex));
}
@Override
public boolean isStringFieldEqual(int ordinal, int fieldIndex, String testValue) {
return missingDataHandler().handleStringEquals(typeName, ordinal, fieldName(fieldIndex), testValue);
}
@Override
public int findVarLengthFieldHashCode(int ordinal, int fieldIndex) {
HollowObjectSchema schema = getSchema();
if(schema.getFieldType(fieldIndex) == FieldType.STRING) {
return HashCodes.hashCode(missingDataHandler().handleString(typeName, ordinal, schema.getFieldName(fieldIndex)));
} else {
return HashCodes.hashCode(missingDataHandler().handleBytes(typeName, ordinal, schema.getFieldName(fieldIndex)));
}
}
private String fieldName(int fieldIndex) {
return getSchema().getFieldName(fieldIndex);
}
private MissingDataHandler missingDataHandler() {
return dataAccess.getMissingDataHandler();
}
@Override
public HollowTypeReadState getTypeState() {
throw new UnsupportedOperationException("No HollowTypeReadState exists for " + typeName);
}
@Override
public void setSamplingDirector(HollowSamplingDirector director) {
}
@Override
public void setFieldSpecificSamplingDirector(HollowFilterConfig fieldSpec, HollowSamplingDirector director) {
}
@Override
public void ignoreUpdateThreadForSampling(Thread t) {
}
@Override
public HollowSampler getSampler() {
return HollowObjectSampler.NULL_SAMPLER;
}
}
| 9,068 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/disabled/HollowObjectDisabledDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess.disabled;
import com.netflix.hollow.api.sampling.HollowSampler;
import com.netflix.hollow.api.sampling.HollowSamplingDirector;
import com.netflix.hollow.core.read.dataaccess.HollowDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowObjectTypeDataAccess;
import com.netflix.hollow.core.read.engine.HollowTypeReadState;
import com.netflix.hollow.core.read.filter.HollowFilterConfig;
import com.netflix.hollow.core.schema.HollowObjectSchema;
public class HollowObjectDisabledDataAccess implements HollowObjectTypeDataAccess {
public static final HollowObjectDisabledDataAccess INSTANCE = new HollowObjectDisabledDataAccess();
private HollowObjectDisabledDataAccess() { }
@Override
public HollowDataAccess getDataAccess() {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public void setSamplingDirector(HollowSamplingDirector director) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public void setFieldSpecificSamplingDirector(HollowFilterConfig fieldSpec, HollowSamplingDirector director) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public void ignoreUpdateThreadForSampling(Thread t) {
throw new IllegalStateException("DataAccess is Disabled");
}
@Override
public HollowSampler getSampler() {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public HollowTypeReadState getTypeState() {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public HollowObjectSchema getSchema() {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public boolean isNull(int ordinal, int fieldIndex) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public int readOrdinal(int ordinal, int fieldIndex) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public int readInt(int ordinal, int fieldIndex) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public float readFloat(int ordinal, int fieldIndex) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public double readDouble(int ordinal, int fieldIndex) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public long readLong(int ordinal, int fieldIndex) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public Boolean readBoolean(int ordinal, int fieldIndex) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public byte[] readBytes(int ordinal, int fieldIndex) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public String readString(int ordinal, int fieldIndex) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public boolean isStringFieldEqual(int ordinal, int fieldIndex, String testValue) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public int findVarLengthFieldHashCode(int ordinal, int fieldIndex) {
throw new IllegalStateException("Data Access is Disabled");
}
}
| 9,069 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/disabled/HollowMapDisabledDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess.disabled;
import com.netflix.hollow.api.sampling.HollowSampler;
import com.netflix.hollow.api.sampling.HollowSamplingDirector;
import com.netflix.hollow.core.read.dataaccess.HollowDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowMapTypeDataAccess;
import com.netflix.hollow.core.read.engine.HollowTypeReadState;
import com.netflix.hollow.core.read.filter.HollowFilterConfig;
import com.netflix.hollow.core.read.iterator.HollowMapEntryOrdinalIterator;
import com.netflix.hollow.core.schema.HollowMapSchema;
public class HollowMapDisabledDataAccess implements HollowMapTypeDataAccess {
public static final HollowMapDisabledDataAccess INSTANCE = new HollowMapDisabledDataAccess();
private HollowMapDisabledDataAccess() { }
@Override
public HollowDataAccess getDataAccess() {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public void setSamplingDirector(HollowSamplingDirector director) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public void setFieldSpecificSamplingDirector(HollowFilterConfig fieldSpec, HollowSamplingDirector director) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public void ignoreUpdateThreadForSampling(Thread t) {
throw new IllegalStateException("DataAccess is Disabled");
}
@Override
public HollowSampler getSampler() {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public HollowTypeReadState getTypeState() {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public HollowMapSchema getSchema() {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public int size(int ordinal) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public int get(int ordinal, int keyOrdinal) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public int get(int ordinal, int keyOrdinal, int hashCode) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public int findKey(int ordinal, Object... hashKey) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public int findValue(int ordinal, Object... hashKey) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public long findEntry(int ordinal, Object... hashKey) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public HollowMapEntryOrdinalIterator potentialMatchOrdinalIterator(int ordinal, int hashCode) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public HollowMapEntryOrdinalIterator ordinalIterator(int ordinal) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public long relativeBucket(int ordinal, int bucketIndex) {
throw new IllegalStateException("Data Access is Disabled");
}
}
| 9,070 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/disabled/HollowDisabledDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess.disabled;
import com.netflix.hollow.api.client.StaleHollowReferenceDetector;
import com.netflix.hollow.core.read.dataaccess.HollowDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowTypeDataAccess;
import com.netflix.hollow.core.read.dataaccess.proxy.HollowProxyDataAccess;
import com.netflix.hollow.core.read.missing.MissingDataHandler;
import com.netflix.hollow.core.schema.HollowSchema;
import com.netflix.hollow.core.util.HollowObjectHashCodeFinder;
import java.util.Collection;
import java.util.List;
/**
* A HollowDisabledDataAccess throws an IllegalStateException when access is attempted. This is swapped into
* a {@link HollowProxyDataAccess} if the {@link StaleHollowReferenceDetector} detects that stale references are
* held but unused.
*/
public class HollowDisabledDataAccess implements HollowDataAccess {
public static final HollowDisabledDataAccess INSTANCE = new HollowDisabledDataAccess();
private HollowDisabledDataAccess() { }
@Override
public HollowTypeDataAccess getTypeDataAccess(String typeName) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public HollowTypeDataAccess getTypeDataAccess(String typeName, int ordinal) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public Collection<String> getAllTypes() {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public HollowObjectHashCodeFinder getHashCodeFinder() {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public MissingDataHandler getMissingDataHandler() {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public List<HollowSchema> getSchemas() {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public HollowSchema getSchema(String name) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public HollowSchema getNonNullSchema(String name) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public void resetSampling() { }
@Override
public boolean hasSampleResults() {
return false;
}
}
| 9,071 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/disabled/HollowSetDisabledDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess.disabled;
import com.netflix.hollow.api.sampling.HollowSampler;
import com.netflix.hollow.api.sampling.HollowSamplingDirector;
import com.netflix.hollow.core.read.dataaccess.HollowDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowSetTypeDataAccess;
import com.netflix.hollow.core.read.engine.HollowTypeReadState;
import com.netflix.hollow.core.read.filter.HollowFilterConfig;
import com.netflix.hollow.core.read.iterator.HollowOrdinalIterator;
import com.netflix.hollow.core.schema.HollowSetSchema;
public class HollowSetDisabledDataAccess implements HollowSetTypeDataAccess {
public static final HollowSetDisabledDataAccess INSTANCE = new HollowSetDisabledDataAccess();
private HollowSetDisabledDataAccess() { }
@Override
public int size(int ordinal) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public HollowOrdinalIterator ordinalIterator(int ordinal) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public HollowDataAccess getDataAccess() {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public void setSamplingDirector(HollowSamplingDirector director) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public void setFieldSpecificSamplingDirector(HollowFilterConfig fieldSpec, HollowSamplingDirector director) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public void ignoreUpdateThreadForSampling(Thread t) {
throw new IllegalStateException("DataAccess is Disabled");
}
@Override
public HollowSampler getSampler() {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public HollowTypeReadState getTypeState() {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public HollowSetSchema getSchema() {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public boolean contains(int ordinal, int value) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public boolean contains(int ordinal, int value, int hashCode) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public int findElement(int ordinal, Object... hashKey) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public int relativeBucketValue(int ordinal, int bucketIndex) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public HollowOrdinalIterator potentialMatchOrdinalIterator(int ordinal, int hashCode) {
throw new IllegalStateException("Data Access is Disabled");
}
}
| 9,072 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/dataaccess/disabled/HollowListDisabledDataAccess.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.dataaccess.disabled;
import com.netflix.hollow.api.sampling.HollowSampler;
import com.netflix.hollow.api.sampling.HollowSamplingDirector;
import com.netflix.hollow.core.read.dataaccess.HollowDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowListTypeDataAccess;
import com.netflix.hollow.core.read.engine.HollowTypeReadState;
import com.netflix.hollow.core.read.filter.HollowFilterConfig;
import com.netflix.hollow.core.read.iterator.HollowOrdinalIterator;
import com.netflix.hollow.core.schema.HollowListSchema;
public class HollowListDisabledDataAccess implements HollowListTypeDataAccess {
public static final HollowListDisabledDataAccess INSTANCE = new HollowListDisabledDataAccess();
private HollowListDisabledDataAccess() { }
@Override
public int size(int ordinal) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public HollowOrdinalIterator ordinalIterator(int ordinal) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public HollowDataAccess getDataAccess() {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public void setSamplingDirector(HollowSamplingDirector director) {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public void setFieldSpecificSamplingDirector(HollowFilterConfig fieldSpec, HollowSamplingDirector director) {
throw new IllegalStateException("DataAccess is Disabled");
}
@Override
public void ignoreUpdateThreadForSampling(Thread t) {
throw new IllegalStateException("DataAccess is Disabled");
}
@Override
public HollowSampler getSampler() {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public HollowTypeReadState getTypeState() {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public HollowListSchema getSchema() {
throw new IllegalStateException("Data Access is Disabled");
}
@Override
public int getElementOrdinal(int ordinal, int listIndex) {
throw new IllegalStateException("Data Access is Disabled");
}
}
| 9,073 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/missing/MissingDataHandler.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.missing;
import com.netflix.hollow.core.read.iterator.HollowMapEntryOrdinalIterator;
import com.netflix.hollow.core.read.iterator.HollowOrdinalIterator;
import com.netflix.hollow.core.schema.HollowSchema;
/**
* A MissingDataHandler specifies what to do when a generated API contains methods to access fields which do not
* exist in the loaded Hollow dataset.
*/
public interface MissingDataHandler {
///// OBJECT /////
public boolean handleIsNull(String type, int ordinal, String field);
public Boolean handleBoolean(String type, int ordinal, String field);
public int handleReferencedOrdinal(String type, int ordinal, String field);
public int handleInt(String type, int ordinal, String field);
public long handleLong(String type, int ordinal, String field);
public float handleFloat(String type, int ordinal, String field);
public double handleDouble(String type, int ordinal, String field);
public String handleString(String type, int ordinal, String field);
public boolean handleStringEquals(String type, int ordinal, String field, String testValue);
public byte[] handleBytes(String type, int ordinal, String field);
///// LIST /////
public int handleListSize(String type, int ordinal);
public int handleListElementOrdinal(String type, int ordinal, int idx);
public HollowOrdinalIterator handleListIterator(String type, int ordinal);
///// SET /////
public int handleSetSize(String type, int ordinal);
public HollowOrdinalIterator handleSetIterator(String type, int ordinal);
public HollowOrdinalIterator handleSetPotentialMatchIterator(String type, int ordinal, int hashCode);
public boolean handleSetContainsElement(String type, int ordinal, int elementOrdinal, int elementOrdinalHashCode);
public int handleSetFindElement(String type, int ordinal, Object... keys);
///// MAP /////
public int handleMapSize(String type, int ordinal);
public HollowMapEntryOrdinalIterator handleMapOrdinalIterator(String type, int ordinal);
public HollowMapEntryOrdinalIterator handleMapPotentialMatchOrdinalIterator(String type, int ordinal, int keyHashCode);
public int handleMapGet(String type, int ordinal, int keyOrdinal, int keyOrdinalHashCode);
public int handleMapFindKey(String type, int ordinal, Object... keys);
public int handleMapFindValue(String type, int ordinal, Object... keys);
public long handleMapFindEntry(String type, int ordinal, Object... keys);
///// SCHEMA /////
public HollowSchema handleSchema(String type);
}
| 9,074 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/missing/DefaultMissingDataHandler.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.missing;
import com.netflix.hollow.core.read.iterator.EmptyMapOrdinalIterator;
import com.netflix.hollow.core.read.iterator.EmptyOrdinalIterator;
import com.netflix.hollow.core.read.iterator.HollowMapEntryOrdinalIterator;
import com.netflix.hollow.core.read.iterator.HollowOrdinalIterator;
import com.netflix.hollow.core.schema.HollowSchema;
public class DefaultMissingDataHandler implements MissingDataHandler {
@Override
public boolean handleIsNull(String type, int ordinal, String field) {
return true;
}
@Override
public Boolean handleBoolean(String type, int ordinal, String field) {
return null;
}
@Override
public int handleReferencedOrdinal(String type, int ordinal, String field) {
return -1;
}
@Override
public int handleInt(String type, int ordinal, String field) {
return Integer.MIN_VALUE;
}
@Override
public long handleLong(String type, int ordinal, String field) {
return Long.MIN_VALUE;
}
@Override
public float handleFloat(String type, int ordinal, String field) {
return Float.NaN;
}
@Override
public double handleDouble(String type, int ordinal, String field) {
return Double.NaN;
}
@Override
public String handleString(String type, int ordinal, String field) {
return null;
}
@Override
public boolean handleStringEquals(String type, int ordinal, String field, String testValue) {
return testValue == null;
}
@Override
public byte[] handleBytes(String type, int ordinal, String field) {
return null;
}
@Override
public int handleListSize(String type, int ordinal) {
return 0;
}
@Override
public int handleListElementOrdinal(String type, int ordinal, int idx) {
return -1;
}
@Override
public HollowOrdinalIterator handleListIterator(String type, int ordinal) {
return EmptyOrdinalIterator.INSTANCE;
}
@Override
public int handleSetSize(String type, int ordinal) {
return 0;
}
@Override
public boolean handleSetContainsElement(String type, int ordinal, int elementOrdinal, int elementOrdinalHashCode) {
return false;
}
@Override
public int handleSetFindElement(String type, int ordinal, Object... keys) {
return -1;
}
@Override
public HollowOrdinalIterator handleSetIterator(String type, int ordinal) {
return EmptyOrdinalIterator.INSTANCE;
}
@Override
public HollowOrdinalIterator handleSetPotentialMatchIterator(String type, int ordinal, int hashCode) {
return EmptyOrdinalIterator.INSTANCE;
}
@Override
public int handleMapSize(String type, int ordinal) {
return 0;
}
@Override
public HollowMapEntryOrdinalIterator handleMapOrdinalIterator(String type, int ordinal) {
return EmptyMapOrdinalIterator.INSTANCE;
}
@Override
public HollowMapEntryOrdinalIterator handleMapPotentialMatchOrdinalIterator(String type, int ordinal, int keyHashCode) {
return EmptyMapOrdinalIterator.INSTANCE;
}
@Override
public int handleMapGet(String type, int ordinal, int keyOrdinal, int keyOrdinalHashCode) {
return -1;
}
@Override
public int handleMapFindKey(String type, int ordinal, Object... keys) {
return -1;
}
@Override
public int handleMapFindValue(String type, int ordinal, Object... keys) {
return -1;
}
@Override
public long handleMapFindEntry(String type, int ordinal, Object... keys) {
return -1L;
}
@Override
public HollowSchema handleSchema(String type) {
throw new UnsupportedOperationException("By default, missing types are not handled.");
}
}
| 9,075 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/iterator/HollowSetOrdinalIterator.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.iterator;
import static com.netflix.hollow.core.HollowConstants.ORDINAL_NONE;
import com.netflix.hollow.core.memory.encoding.HashCodes;
import com.netflix.hollow.core.read.dataaccess.HollowSetTypeDataAccess;
public class HollowSetOrdinalIterator implements HollowOrdinalIterator {
private final int setOrdinal;
private final HollowSetTypeDataAccess dataAccess;
private final int numBuckets;
private int currentBucket = -1;
public HollowSetOrdinalIterator(int setOrdinal, HollowSetTypeDataAccess dataAccess) {
this.setOrdinal = setOrdinal;
this.dataAccess = dataAccess;
this.numBuckets = HashCodes.hashTableSize(dataAccess.size(setOrdinal));
}
@Override
public int next() {
int bucketValue;
bucketValue = ORDINAL_NONE;
while(bucketValue == ORDINAL_NONE) {
currentBucket++;
if(currentBucket >= numBuckets)
return NO_MORE_ORDINALS;
bucketValue = dataAccess.relativeBucketValue(setOrdinal, currentBucket);
}
return bucketValue;
}
/**
* @return the bucket position the last ordinal was retrieved from via the call to {@link #next()}.
*/
public int getCurrentBucket() {
return currentBucket;
}
}
| 9,076 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/iterator/EmptyOrdinalIterator.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.iterator;
public class EmptyOrdinalIterator implements HollowOrdinalIterator {
public static final EmptyOrdinalIterator INSTANCE = new EmptyOrdinalIterator();
private EmptyOrdinalIterator() { }
@Override
public int next() {
return NO_MORE_ORDINALS;
}
}
| 9,077 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/iterator/HollowListOrdinalIterator.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.iterator;
import com.netflix.hollow.core.read.dataaccess.HollowListTypeDataAccess;
public class HollowListOrdinalIterator implements HollowOrdinalIterator {
private final int listOrdinal;
private final HollowListTypeDataAccess dataAccess;
private final int size;
private int currentElement;
public HollowListOrdinalIterator(int listOrdinal, HollowListTypeDataAccess dataAccess) {
this.listOrdinal = listOrdinal;
this.dataAccess = dataAccess;
this.size = dataAccess.size(listOrdinal);
}
@Override
public int next() {
if(currentElement == size)
return NO_MORE_ORDINALS;
return dataAccess.getElementOrdinal(listOrdinal, currentElement++);
}
}
| 9,078 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/iterator/HollowMapEntryOrdinalIteratorImpl.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.iterator;
import com.netflix.hollow.core.memory.encoding.HashCodes;
import com.netflix.hollow.core.read.dataaccess.HollowMapTypeDataAccess;
public class HollowMapEntryOrdinalIteratorImpl implements HollowMapEntryOrdinalIterator {
private final int mapOrdinal;
private final HollowMapTypeDataAccess dataAccess;
private final int numBuckets;
private int currentBucket = -1;
private int key;
private int value;
public HollowMapEntryOrdinalIteratorImpl(int mapOrdinal, HollowMapTypeDataAccess dataAccess) {
this.mapOrdinal = mapOrdinal;
this.dataAccess = dataAccess;
this.numBuckets = HashCodes.hashTableSize(dataAccess.size(mapOrdinal));
}
@Override
public int getKey() {
return key;
}
@Override
public int getValue() {
return value;
}
public int getCurrentBucket() {
return currentBucket;
}
@Override
public boolean next() {
key = -1;
while(key == -1) {
currentBucket++;
if(currentBucket >= numBuckets)
return false;
long bucketVal = dataAccess.relativeBucket(mapOrdinal, currentBucket);
this.key = (int)(bucketVal >>> 32);
this.value = (int)bucketVal;
}
return true;
}
}
| 9,079 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/iterator/EmptyMapOrdinalIterator.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.iterator;
import java.util.NoSuchElementException;
public class EmptyMapOrdinalIterator implements HollowMapEntryOrdinalIterator {
public static final EmptyMapOrdinalIterator INSTANCE = new EmptyMapOrdinalIterator();
private EmptyMapOrdinalIterator() { }
@Override
public boolean next() {
return false;
}
@Override
public int getKey() {
throw new NoSuchElementException("This MapEntryOrdinalIterator is empty");
}
@Override
public int getValue() {
throw new NoSuchElementException("This MapEntryOrdinalIterator is empty");
}
}
| 9,080 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/iterator/HollowOrdinalIterator.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.iterator;
/**
* An iterator over ordinals. The general pattern for use is:
* <pre>
* {@code
* HollowOrdinalIterator iter = ...;
*
* int ordinal = iter.next();
* while(ordinal != HollowOrdinalIterator.NO_MORE_ORDINALS) {
* /// do something with the ordinal
* ordinal = iter.next();
* }
* }
* </pre>
*
*/
public interface HollowOrdinalIterator {
/**
* A value indicating that no more ordinals are available in the iteration.
*/
public static final int NO_MORE_ORDINALS = Integer.MAX_VALUE;
/**
* @return The next ordinal, or {@link HollowOrdinalIterator#NO_MORE_ORDINALS} if no more ordinals are available in this iteration.
*/
public int next();
}
| 9,081 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/iterator/HollowMapEntryOrdinalIterator.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.iterator;
/**
* A {@link HollowMapEntryOrdinalIterator} allows for iteration over key/value entries in a Hollow MAP record.
* <p>
* The pattern for usage is:
* <pre>
* {@code
* HollowMapEntryOrdinalIterator iter = /// some iterator
* while(iter.next()) {
* int keyOrdinal = iter.getKey();
* int valueOrdinal = iter.getValue();
* }
* }
* </pre>
*/
public interface HollowMapEntryOrdinalIterator {
public boolean next();
public int getKey();
public int getValue();
}
| 9,082 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/filter/TypeFilter.java | package com.netflix.hollow.core.read.filter;
import static com.netflix.hollow.core.read.filter.TypeActions.newTypeActions;
import static com.netflix.hollow.core.read.filter.TypeFilter.Builder.Action.exclude;
import static com.netflix.hollow.core.read.filter.TypeFilter.Builder.Action.excludeRecursive;
import static com.netflix.hollow.core.read.filter.TypeFilter.Builder.Action.include;
import static com.netflix.hollow.core.read.filter.TypeFilter.Builder.Action.includeRecursive;
import static com.netflix.hollow.core.read.filter.TypeFilter.Builder.Action.next;
import static com.netflix.hollow.core.schema.HollowObjectSchema.FieldType.REFERENCE;
import static java.util.Collections.singletonMap;
import static java.util.Collections.unmodifiableMap;
import static java.util.Objects.requireNonNull;
import static java.util.function.Function.identity;
import static java.util.stream.Collectors.toMap;
import com.netflix.hollow.core.read.filter.TypeFilter.Builder.Action;
import com.netflix.hollow.core.read.filter.TypeFilter.Builder.Rule;
import com.netflix.hollow.core.schema.HollowCollectionSchema;
import com.netflix.hollow.core.schema.HollowMapSchema;
import com.netflix.hollow.core.schema.HollowObjectSchema;
import com.netflix.hollow.core.schema.HollowSchema;
import com.netflix.hollow.core.schema.HollowSchema.SchemaType.UnrecognizedSchemaTypeException;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.function.BiFunction;
import java.util.stream.IntStream;
import java.util.stream.Stream;
/**
* <p>Filters types from a dataset by including or excluding types; and for object types, including or excluding
* fields. For example:</p>
*
* <pre>{@code
* newTypeFilter()
* .excludeAll()
* .includeRecursive("Alpha")
* .include("Beta")
* }</pre>
*
* <p>See {@link TypeFilter.Builder} for more examples.</p>
*/
@com.netflix.hollow.PublicApi
public interface TypeFilter {
/**
* Creates a type filter that defaults to including all types as if by calling
* {@code newTypeFilter().includeAll()}.
*
* @return the filter
*/
public static TypeFilter.Builder newTypeFilter() {
return new Builder().includeAll();
}
/**
* Returns whether the specified type should be included.
*
* @param type the type to check
* @return true if the type should be included, false otherwise
*/
boolean includes(String type);
/**
* <p>Returns whether the specified field on the indicated type should be included. For non-{@code OBJECT} types
* this method always returns false.</p>
*
* @param type the type to check
* @param field the field to check
* @return true if the field should be included, false otherwise
* @see com.netflix.hollow.core.schema.HollowSchema.SchemaType
*/
boolean includes(String type, String field);
/**
* Resolve this type filter against the provide schema. May return itself if already reasolved, otherwise
* returns a new filter.
*
* @param schemas schemas to resolve against
* @return a resolved type filter
*/
default TypeFilter resolve(List<HollowSchema> schemas) {
return this;
}
/**
* <p>A builder for a {@code TypeFilter}. Inclusion and exclusion rules can be combined (later rules take
* precedence over earlier ones). Examples:</p>
*
* <pre>{@code
* // include everything
* newTypeFilter()
*
* // exclude everything except Alpha
* newTypeFilter()
* .excludeAll()
* .include("Alpha")
*
* // exclude everything except Alpha's 'a1' field
* newTypeFilter()
* .excludeAll()
* .include("Alpha", "a1")
*
* // exclude Alpha
* newTypeFilter()
* .exclude("Alpha")
*
* // exclude Alpha's 'a1' field
* newTypeFilter()
* .exclude("Alpha", "a1")
*
* // exclude Alpha except for its 'a1' field
* newTypeFilter()
* .exclude("Alpha")
* .include("Alpha", "a1")
*
* // exclude Alpha and all types reachable from Alpha
* newTypeFilter().excludeRecursive("Alpha")
*
* // exclude Alpha's 'a1' field and all types reachable from it
* newTypeFilter().excludeRecursive("Alpha", "a1")
*
* // Given: Omega reachable from Alpha
* // Recursively exclude Alpha while retaining Omega and all types reachable from it
* newTypeFilter()
* .excludeRecursive("Alpha")
* .includeRecursive("Omega")
* }</pre>
*
* <p>Not thread safe.</p>
*/
@com.netflix.hollow.PublicApi
final class Builder {
List<Rule> rules;
private Builder() {
rules = new ArrayList<>();
}
public Builder includeAll() {
rules.clear();
rules.add(INCLUDE_ALL);
return this;
}
public Builder excludeAll() {
rules.clear();
rules.add(EXCLUDE_ALL);
return this;
}
/**
* <p>Include the specified type. Non-recursive.</p>
*
* @param type type to include
* @return this builder
* @see #includeRecursive(String)
* @see #include(String, String)
*/
public Builder include(String type) {
requireNonNull(type, "type required");
rules.add((t,f) -> type.equals(t) ? include : next);
return this;
}
/**
* <p>Include the specified type recursively.</p>
*
* <p>If {@code type} is an {@code OBJECT} the types referenced by each of its {@code REFERENCE} fields are included as
* if by calling {@code #includeRecursive(referencedType)}.</p>
*
* @param type type to include
* @return this builder
*/
public Builder includeRecursive(String type) {
requireNonNull(type, "type required");
rules.add((t,f) -> type.equals(t) ? includeRecursive : next);
return this;
}
/**
* <p>Include the field on the specified type. Non-recursive. Has no effect on non-{@code OBJECT} types.</p>
*/
@com.netflix.hollow.Internal
public Builder include(String type, String field) {
requireNonNull(type, "type required");
requireNonNull(field, "field name required");
rules.add((t,f) -> type.equals(t) && field.equals(f) ? include : next);
return this;
}
/**
* <p>Include the field on the specified type recursively. If {@code field} is reference to another type, that
* type will be included as if calling {@code includeRecursive(referencedType)}. Has no effect on non-{@code OBJECT}
* types.</p>
*/
@com.netflix.hollow.Internal
public Builder includeRecursive(String type, String field) {
requireNonNull(type, "type required");
requireNonNull(field, "field name required");
rules.add((t,f) -> type.equals(t) && field.equals(f) ? includeRecursive : next);
return this;
}
/**
* <p>Exclude the specified type. Non-recursive.</p>
*
* @param type type to exclude
* @return this builder
* @see #excludeRecursive(String)
* @see #exclude(String, String)
*/
public Builder exclude(String type) {
requireNonNull(type, "type required");
rules.add((t,f) -> type.equals(t) ? exclude : next);
return this;
}
/**
* <p>Exclude the specified type recursively.</p>
*
* <p>If {@code type} is an {@code OBJECT} the types referenced by each of its {@code REFERENCE} fields are excluded
* as if by calling {@code #excludeRecursive(referencedType)}.</p>
*
* @param type type to exclude
* @return this builder
*/
public Builder excludeRecursive(String type) {
requireNonNull(type, "type required");
rules.add((t,f) -> type.equals(t) ? excludeRecursive : next);
return this;
}
/**
* <p>Exclude the field on the specified type. Non-recursive. Has no effect on non-{@code OBJECT} types.</p>
*/
public Builder exclude(String type, String field) {
requireNonNull(type, "type required");
requireNonNull(field, "field name required");
rules.add((t,f) -> type.equals(t) && field.equals(f) ? exclude : next);
return this;
}
/**
* <p>Exclude the field on the specified type recursively. If {@code field} is reference to another type, that
* type will be excluded as if calling {@code excludeRecursive(referencedType)}. Has no effect on
* non-{@code OBJECT} types.</p>
*/
public Builder excludeRecursive(String type, String field) {
requireNonNull(type, "type required");
requireNonNull(field, "field name required");
rules.add((t,f) -> type.equals(t) && field.equals(f) ? excludeRecursive : next);
return this;
}
public TypeFilter resolve(List<HollowSchema> schemas) {
return new Resolver(rules, schemas).resolve();
}
public TypeFilter build() {
return new UnresolvedTypeFilter(rules);
}
@FunctionalInterface
@com.netflix.hollow.Internal
interface Rule extends BiFunction<String,String, Action> {
@Override
Action apply(String type, String field);
}
private static final Rule INCLUDE_ALL = (type, field) -> include;
private static final Rule EXCLUDE_ALL = (type, field) -> exclude;
/**
* Actions on a target (a type or a type's field).
*/
@com.netflix.hollow.Internal
enum Action {
/** move on to the next action */
next(false, false),
/** include the target (non-recursive) */
include(true, false),
/** include the target and its descendants */
includeRecursive(true, true),
/** exclude the target (non-recursive) */
exclude(false, false),
/** exclude the target and its descendants */
excludeRecursive(false, true);
/** {@code true} if target should be included, {@code false} otherwise */
final boolean included;
/** {@code true} if action applies to target's descendants, {@code false} otherwise. */
final boolean recursive;
Action(boolean included, boolean recursive) {
this.included = included;
this.recursive = recursive;
}
}
}
}
/**
* A filter with its rules resolved against a dataset's schemas.
*/
@com.netflix.hollow.Internal
class ResolvedTypeFilter implements TypeFilter {
private final Map<String, TypeActions> actionsMap;
ResolvedTypeFilter(Map<String, TypeActions> actionsMap) {
this.actionsMap = unmodifiableMap(new LinkedHashMap<>(actionsMap));
}
@Override
public boolean includes(String type) {
requireNonNull(type, "type name required");
TypeActions ta = actionsMap.get(type);
return ta != null && ta.actions().values().stream().anyMatch(action -> action.included);
}
@Override
public boolean includes(String type, String field) {
requireNonNull(type, "type name required");
requireNonNull(field, "field name required");
TypeActions ta = actionsMap.get(type);
return ta != null && (ta.action().included || ta.action(field).included);
}
}
/**
* <p>A filter that needs to be resolved against a dataset's schemas before it can be used for filtering.</p>
*
* <p>Recursive actions require the schema to be resolved. This class retains intent until resolution. For
* comparison, defining recursive actions using {@link HollowFilterConfig} with
* {@link com.netflix.hollow.api.consumer.HollowConsumer.Builder} has a chicken-and-egg problem: the builder requires
* the filter config up front and the filter requires a full schema in order to express recursive actions, but callers
* typically don't have the schema until after a consumer has loaded a snapshot.</p>
*
* <p>All filtering methods in this class throw {@code IllegalStateException}. Call {@link TypeFilter#resolve(List)}
* and use the returned filter instead.</p>
*
*/
@com.netflix.hollow.Internal
class UnresolvedTypeFilter implements TypeFilter {
private final List<Rule> rules;
UnresolvedTypeFilter(List<Rule> rules) {
this.rules = rules;
}
@Override
public boolean includes(String type) {
requireNonNull(type);
throw new IllegalStateException("unresolved type filter");
}
@Override
public boolean includes(String type, String field) {
requireNonNull(type);
throw new IllegalStateException("unresolved type filter");
}
@Override
public TypeFilter resolve(List<HollowSchema> schemas) {
return new Resolver(rules, schemas).resolve();
}
}
@com.netflix.hollow.Internal
final class Resolver {
private final Map<String,HollowSchema> schemas;
private final List<Rule> rules;
Resolver(List<Rule> rules, List<HollowSchema> schemas) {
assert !rules.isEmpty();
this.rules = rules;
this.schemas = schemas.stream().collect(toMap(HollowSchema::getName, identity()));
}
TypeFilter resolve() {
Map<String, TypeActions> resolved = rules
.stream()
.flatMap(rule -> schemas
.values()
.stream()
.flatMap(schema -> descendants(rule, schema)))
.filter(ta -> ta.actions().values().stream().anyMatch(a -> a != next))
.collect(toMap(TypeActions::type, identity(), TypeActions::merge));
return new ResolvedTypeFilter(resolved);
}
private Stream<TypeActions> descendants(Rule rule, HollowSchema schema) {
String type = schema.getName();
Action action = rule.apply(type, null);
TypeActions parent = TypeActions.newTypeActions(type, action);
switch (schema.getSchemaType()) {
case OBJECT:
HollowObjectSchema os = (HollowObjectSchema) schema;
return IntStream
.range(0, os.numFields())
.boxed()
.flatMap(i -> {
String field = os.getFieldName(i);
Action fa = rule.apply(type, field);
if (fa == next) return Stream.empty();
TypeActions child = newTypeActions(type, field, fa);
Action descendantAction = fa.recursive ? fa : action;
if (descendantAction.recursive && os.getFieldType(i) == REFERENCE) {
String refType = os.getReferencedType(i);
HollowSchema refSchema = schemas.get(refType);
assert refSchema != null;
Stream<TypeActions> descendants = descendants((t,f) -> descendantAction, refSchema);
return Stream.concat(Stream.of(parent, child), descendants);
} else {
return Stream.of(parent, child);
}
});
case SET:
case LIST:
if (action == next) {
return Stream.empty();
} else if (action.recursive) {
HollowCollectionSchema cs = (HollowCollectionSchema) schema;
HollowSchema elemSchema = schemas.get(cs.getElementType());
assert elemSchema != null;
Stream<TypeActions> descendants = descendants((t, f) -> action, elemSchema);
return Stream.concat(Stream.of(parent), descendants);
} else {
return Stream.of(parent);
}
case MAP:
if (action == next) {
return Stream.empty();
} else if (action.recursive) {
HollowMapSchema ms = (HollowMapSchema) schema;
HollowSchema kSchema = schemas.get(ms.getKeyType());
HollowSchema vSchema = schemas.get(ms.getValueType());
Stream<TypeActions> descendants = Stream.concat(
descendants((t, f) -> action, kSchema),
descendants((t1, f1) -> action, vSchema));
return Stream.concat(Stream.of(parent), descendants);
} else {
return Stream.of(parent);
}
default:
throw new UnrecognizedSchemaTypeException(type, schema.getSchemaType());
}
}
}
@com.netflix.hollow.Internal
class TypeActions {
private static final String ALL = new String("*"); // avoid interning
static TypeActions newTypeActions(String type, Action action) {
return new TypeActions(type, singletonMap(ALL, action));
}
static TypeActions newTypeActions(String type, String field, Action action) {
return new TypeActions(type, singletonMap(field, action));
}
String type() {
return type;
}
private final String type;
private final Map<String, Action> actions;
private TypeActions(String type, Map<String, Action> actions) {
this.type = type;
this.actions = actions;
}
Map<String, Action> actions() {
return actions;
}
Action action() {
return actions.getOrDefault(ALL, next);
}
Action action(String field) {
return actions.getOrDefault(field, next);
}
TypeActions merge(TypeActions other) {
Map<String, Action> m = new LinkedHashMap<>();
m.putAll(actions);
m.putAll(other.actions);
Action all = m.get(ALL);
m = m.entrySet().stream()
.filter(entry -> entry.getKey() == ALL || entry.getValue() != all)
.collect(toMap(Map.Entry::getKey, Map.Entry::getValue));
return new TypeActions(type, m);
}
@Override
public String toString() {
return type + actions;
}
}
| 9,083 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/filter/HollowFilterConfig.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.filter;
import com.netflix.hollow.api.consumer.HollowConsumer;
import com.netflix.hollow.core.read.engine.HollowBlobReader;
import com.netflix.hollow.core.read.engine.HollowReadStateEngine;
import com.netflix.hollow.core.schema.HollowCollectionSchema;
import com.netflix.hollow.core.schema.HollowMapSchema;
import com.netflix.hollow.core.schema.HollowObjectSchema;
import com.netflix.hollow.core.schema.HollowObjectSchema.FieldType;
import com.netflix.hollow.core.schema.HollowSchema;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
/**
* A HollowFilterConfig specifies a subset of the available fields in a data model. It can be specified
* as either an exclude filter, or an include filter.
* <p>
* An exclude filter specifies fields which are excluded from the subset. With an exclude filter,
* all fields are included by default and only the identified field are excluded.
* <p>
* An include filter (the default) specifies fields which are included in the subset. With an include filter,
* all fields are excluded by default and only the identified fields are included.
* <p>
* A HollowFilterConfig can be used to reduce the heap footprint on consumers, either with
* {@link HollowConsumer.Builder#withFilterConfig(HollowFilterConfig)} if using a {@link HollowConsumer} or with
* {@link HollowBlobReader#readSnapshot(java.io.InputStream, HollowFilterConfig)} if using a {@link HollowReadStateEngine}
* directly.
* <p>
* Note that when using this to configure a filter for a consumer, only the snapshot must be filtered. Subsequent
* deltas will automatically use the same filter.
*
* <p>{@link HollowFilterConfig} is deprecated in favor of {@link TypeFilter}.</p>
*
* <p>{@code HollowFilterConfig} has these limitations:</p>
*
* <ul>
* <li>cannot mix inclusions and exclusions in a single filter and cannot compose filters</li>
* <li>recursive actions requires that callers already have the dataset's schema, leading to
* a chicken-and-egg situation</li>
* </ul>
*
* @deprecated use {@link TypeFilter}
*/
@Deprecated
public class HollowFilterConfig implements TypeFilter {
private final ObjectFilterConfig INCLUDE_ALL = new ObjectFilterConfig(Boolean.TRUE);
private final ObjectFilterConfig INCLUDE_NONE = new ObjectFilterConfig(Boolean.FALSE);
private final boolean isExcludeFilter;
private final Set<String> specifiedTypes;
private final Map<String, ObjectFilterConfig> specifiedFieldConfigs;
/**
* Create a new <i>include</i> filter.
*/
public HollowFilterConfig() {
this(false);
}
/**
* Create a new filter
* @param isExcludeFilter true for an <i>exclude</i> filter, false for an <i>include</i> filter.
*/
public HollowFilterConfig(boolean isExcludeFilter) {
this.isExcludeFilter = isExcludeFilter;
this.specifiedTypes = new HashSet<String>();
this.specifiedFieldConfigs = new HashMap<String, ObjectFilterConfig>();
}
/**
* Add a type. All fields in the type will be either excluded or included, depending on
* whether this is an exclude or include filter, respectively.
*
* @param type the type name
*/
public void addType(String type) {
specifiedTypes.add(type);
}
/**
* Add a type, plus recursively add any directly or transitively referenced types.
*
* All fields in these types will be either excluded or included, depending on whether
* this is an exclude or include filter, respectively.
*
* @param type A type from the data model.
* @param schemas All schemas from the data model.
*/
public void addTypeRecursive(String type, Collection<HollowSchema> schemas) {
addTypeRecursive(type, mapSchemas(schemas));
}
/**
* Add a type, plus recursively add any directly or transitively referenced types.
*
* All fields in these types will be either excluded or included, depending on whether
* this is an exclude or include filter, respectively.
*
*
* @param type A type from the data model.
* @param schemas A map of typeName to schema including all schemas for this data model.
*/
public void addTypeRecursive(String type, Map<String, HollowSchema> schemas) {
addType(type);
HollowSchema schema = schemas.get(type);
switch(schema.getSchemaType()) {
case OBJECT:
HollowObjectSchema objSchema = (HollowObjectSchema)schema;
for(int i=0;i<objSchema.numFields();i++) {
if(objSchema.getFieldType(i) == FieldType.REFERENCE)
addTypeRecursive(objSchema.getReferencedType(i), schemas);
}
break;
case MAP:
addTypeRecursive(((HollowMapSchema)schema).getKeyType(), schemas);
addTypeRecursive(((HollowMapSchema)schema).getValueType(), schemas);
break;
case LIST:
case SET:
addTypeRecursive(((HollowCollectionSchema)schema).getElementType(), schemas);
break;
}
}
/**
* Add an individual field from an OBJECT schema. This field will be either
* excluded or included, depending on whether this is an exclude or include filter, respectively.
*
* @param type The OBJECT type from the data model
* @param objectField The field in the specified type to either include or exclude.
*/
public void addField(String type, String objectField) {
ObjectFilterConfig typeConfig = specifiedFieldConfigs.get(type);
if(typeConfig == null) {
typeConfig = new ObjectFilterConfig();
specifiedFieldConfigs.put(type, typeConfig);
}
typeConfig.addField(objectField);
}
/**
* Add an individual field from an OBJECT schema, plus recursively add any directly or transitively referenced types.
* This field will be either excluded or included, depending on whether this is an exclude or include filter, respectively.
*
* @param type The OBJECT type from the data model
* @param objectField The field in the specified type to either include or exclude.
* @param schemas All schemas from the data model.
*/
public void addFieldRecursive(String type, String objectField, Collection<HollowSchema> schemas) {
addFieldRecursive(type, objectField, mapSchemas(schemas));
}
/**
* Add an individual field from an OBJECT schema, plus recursively add any directly or transitively referenced types.
* This field will be either excluded or included, depending on whether this is an exclude or include filter, respectively.
*
* @param type The OBJECT type from the data model
* @param objectField The field in the specified type to either include or exclude.
* @param schemas A map of typeName to schema including all schemas for this data model.
*/
public void addFieldRecursive(String type, String objectField, Map<String, HollowSchema> schemas) {
addField(type, objectField);
HollowObjectSchema schema = (HollowObjectSchema)schemas.get(type);
if(schema.getFieldType(objectField) == FieldType.REFERENCE) {
addTypeRecursive(schema.getReferencedType(objectField), schemas);
}
}
/**
* @param type A type from the data model
* @return whether or not this filter includes the specified type.
*/
public boolean doesIncludeType(String type) {
if(isExcludeFilter)
return !specifiedTypes.contains(type);
return specifiedTypes.contains(type) || specifiedFieldConfigs.containsKey(type);
}
/**
* @return true if this is an <i>exclude</i> filter. False otherwise.
*/
public boolean isExcludeFilter() {
return isExcludeFilter;
}
public int numSpecifiedTypes() {
return specifiedTypes.size();
}
public Set<String> getSpecifiedTypes() {
return specifiedTypes;
}
public ObjectFilterConfig getObjectTypeConfig(String type) {
ObjectFilterConfig typeConfig = specifiedFieldConfigs.get(type);
if(typeConfig != null)
return typeConfig;
if(isExcludeFilter) {
if(specifiedTypes.contains(type))
return INCLUDE_NONE;
return INCLUDE_ALL;
} else {
if(specifiedTypes.contains(type))
return INCLUDE_ALL;
return INCLUDE_NONE;
}
}
@Override
public boolean includes(String type) {
return doesIncludeType(type);
}
@Override
public boolean includes(String type, String field) {
return getObjectTypeConfig(type).includesField(field);
}
public class ObjectFilterConfig {
private final Boolean alwaysAnswer;
private final Set<String> specifiedFields;
public ObjectFilterConfig() {
this(null);
}
public ObjectFilterConfig(Boolean alwaysAnswer) {
this.specifiedFields = new HashSet<String>();
this.alwaysAnswer = alwaysAnswer;
}
private void addField(String fieldName) {
specifiedFields.add(fieldName);
}
public boolean includesField(String field) {
if(alwaysAnswer != null)
return alwaysAnswer.booleanValue();
if(isExcludeFilter)
return !specifiedFields.contains(field);
return specifiedFields.contains(field);
}
public int numIncludedFields() {
return specifiedFields.size();
}
}
/**
* Write this HollowFilterConfig to a human-readable, and parseable String.
*
* This can be used to serialize a configuration. The returned String can be used to
* recreate the {@link HollowFilterConfig} using {@link HollowFilterConfig#fromString(String)}
*/
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append(isExcludeFilter ? "EXCLUDE" : "INCLUDE");
for(String type : specifiedTypes) {
builder.append('\n').append(type);
}
for(Map.Entry<String, ObjectFilterConfig> entry : specifiedFieldConfigs.entrySet()) {
String typeName = entry.getKey();
ObjectFilterConfig typeConfig = entry.getValue();
if(typeConfig.specifiedFields.isEmpty()) {
builder.append('\n').append(typeName);
} else {
for(String field : typeConfig.specifiedFields) {
builder.append('\n').append(typeName).append('.').append(field);
}
}
}
return builder.toString();
}
/**
* Parse a HollowFilterConfig from the specified String. The String should contain multiple lines.
* The first line should be either "EXCLUDE" or "INCLUDE". Subsequent lines should be one of the following:
* <ul>
* <li><typeName></li>
* <li><typeName>.<fieldName></li>
* </ul>
*
* @param conf the configuration as a string
* @return the filter configuration
*/
public static HollowFilterConfig fromString(String conf) {
String lines[] = conf.split("\n");
HollowFilterConfig config = new HollowFilterConfig("EXCLUDE".equals(lines[0]));
for(int i=1;i<lines.length;i++) {
int delimiterIdx = lines[i].indexOf('.');
if(delimiterIdx == -1) {
config.addType(lines[i]);
} else {
String type = lines[i].substring(0, delimiterIdx);
String field = lines[i].substring(delimiterIdx+1);
config.addField(type, field);
}
}
return config;
}
private Map<String, HollowSchema> mapSchemas(Collection<HollowSchema> schemas) {
Map<String, HollowSchema> schemaMap = new HashMap<String, HollowSchema>();
for(HollowSchema schema : schemas) {
schemaMap.put(schema.getName(), schema);
}
return schemaMap;
}
}
| 9,084 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine/HollowCollectionTypeReadState.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.engine;
import com.netflix.hollow.core.memory.MemoryMode;
import com.netflix.hollow.core.read.dataaccess.HollowCollectionTypeDataAccess;
import com.netflix.hollow.core.read.iterator.HollowOrdinalIterator;
import com.netflix.hollow.core.schema.HollowCollectionSchema;
import com.netflix.hollow.core.schema.HollowSchema;
/**
* The parent class for {@link HollowTypeReadState}s for SET or LIST records.
*/
public abstract class HollowCollectionTypeReadState extends HollowTypeReadState implements HollowCollectionTypeDataAccess {
public HollowCollectionTypeReadState(HollowReadStateEngine stateEngine, MemoryMode memoryMode, HollowSchema schema) {
super(stateEngine, memoryMode, schema);
}
public abstract int size(int ordinal);
public abstract HollowOrdinalIterator ordinalIterator(int ordinal);
public abstract HollowCollectionSchema getSchema();
}
| 9,085 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine/PopulatedOrdinalListener.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.engine;
import java.util.BitSet;
/**
* A PopulatedOrdinalListener is (unless explicitly specified) automatically registered with each type
* in a {@link HollowReadStateEngine}. This listener tracks the populated and previous ordinals using
* BitSets.
*/
public class PopulatedOrdinalListener implements HollowTypeStateListener {
private final BitSet previousOrdinals;
private final BitSet populatedOrdinals;
public PopulatedOrdinalListener() {
this.populatedOrdinals = new BitSet();
this.previousOrdinals = new BitSet();
}
@Override
public void beginUpdate() {
previousOrdinals.clear();
previousOrdinals.or(populatedOrdinals);
}
@Override
public void addedOrdinal(int ordinal) {
populatedOrdinals.set(ordinal);
}
@Override
public void removedOrdinal(int ordinal) {
populatedOrdinals.clear(ordinal);
}
@Override
public void endUpdate() { }
public boolean updatedLastCycle() {
return !populatedOrdinals.equals(previousOrdinals);
}
public BitSet getPopulatedOrdinals() {
return populatedOrdinals;
}
public BitSet getPreviousOrdinals() {
return previousOrdinals;
}
}
| 9,086 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine/HollowReadStateEngine.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.engine;
import com.netflix.hollow.api.error.SchemaNotFoundException;
import com.netflix.hollow.core.HollowStateEngine;
import com.netflix.hollow.core.memory.pool.ArraySegmentRecycler;
import com.netflix.hollow.core.memory.pool.RecyclingRecycler;
import com.netflix.hollow.core.read.dataaccess.HollowDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowTypeDataAccess;
import com.netflix.hollow.core.read.engine.map.HollowMapTypeReadState;
import com.netflix.hollow.core.read.engine.set.HollowSetTypeReadState;
import com.netflix.hollow.core.read.missing.DefaultMissingDataHandler;
import com.netflix.hollow.core.read.missing.MissingDataHandler;
import com.netflix.hollow.core.schema.HollowListSchema;
import com.netflix.hollow.core.schema.HollowMapSchema;
import com.netflix.hollow.core.schema.HollowObjectSchema;
import com.netflix.hollow.core.schema.HollowSchema;
import com.netflix.hollow.core.schema.HollowSetSchema;
import com.netflix.hollow.core.util.DefaultHashCodeFinder;
import com.netflix.hollow.core.util.HollowObjectHashCodeFinder;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* A HollowReadStateEngine is our main handle to the current state of a Hollow dataset as a data consumer.
* <p>
* A dataset changes over time. A core concept in Hollow is that the timeline for a changing dataset can be
* broken down into discrete data states, each of which is a complete snapshot of the data at a particular point in time.
* Data consumers handle data states with a HollowReadStateEngine.
*/
public class HollowReadStateEngine implements HollowStateEngine, HollowDataAccess {
private final Map<String, HollowTypeReadState> typeStates;
private final Map<String, List<HollowTypeStateListener>> listeners;
private final HollowObjectHashCodeFinder hashCodeFinder;
private final boolean listenToAllPopulatedOrdinals;
private boolean skipTypeShardUpdateWithNoAdditions;
private ArraySegmentRecycler memoryRecycler;
private Map<String,String> headerTags;
private Set<String> typesWithDefinedHashCodes = new HashSet<String>();
private long currentRandomizedTag;
private long originRandomizedTag;
private MissingDataHandler missingDataHandler = new DefaultMissingDataHandler();
public HollowReadStateEngine() {
this(DefaultHashCodeFinder.INSTANCE, true, new RecyclingRecycler());
}
public HollowReadStateEngine(boolean listenToAllPopulatedOrdinals) {
this(DefaultHashCodeFinder.INSTANCE, listenToAllPopulatedOrdinals, new RecyclingRecycler());
}
public HollowReadStateEngine(ArraySegmentRecycler recycler) {
this(DefaultHashCodeFinder.INSTANCE, true, recycler);
}
public HollowReadStateEngine(boolean listenToAllPopulatedOrdinals, ArraySegmentRecycler recycler) {
this(DefaultHashCodeFinder.INSTANCE, listenToAllPopulatedOrdinals, recycler);
}
@Deprecated
public HollowReadStateEngine(HollowObjectHashCodeFinder hashCodeFinder) {
this(hashCodeFinder, true, new RecyclingRecycler());
}
@Deprecated
public HollowReadStateEngine(HollowObjectHashCodeFinder hashCodeFinder, boolean listenToAllPopulatedOrdinals, ArraySegmentRecycler recycler) {
this.typeStates = new HashMap<String, HollowTypeReadState>();
this.listeners = new HashMap<String, List<HollowTypeStateListener>>();
this.hashCodeFinder = hashCodeFinder;
this.memoryRecycler = recycler;
this.listenToAllPopulatedOrdinals = listenToAllPopulatedOrdinals;
}
@Override
public HollowObjectHashCodeFinder getHashCodeFinder() {
return hashCodeFinder;
}
protected void addTypeState(HollowTypeReadState typeState) {
typeStates.put(typeState.getSchema().getName(), typeState);
if(listenToAllPopulatedOrdinals) {
typeState.addListener(new PopulatedOrdinalListener());
}
List<HollowTypeStateListener> list = listeners.get(typeState.getSchema().getName());
if(list != null) {
for(HollowTypeStateListener listener : list)
typeState.addListener(listener);
}
}
/**
* Add a {@link HollowTypeStateListener} to a type.
*
* @param typeName the type name
* @param listener the listener to add
*/
public void addTypeListener(String typeName, HollowTypeStateListener listener) {
List<HollowTypeStateListener> list = listeners.get(typeName);
if(list == null) {
list = new ArrayList<HollowTypeStateListener>();
listeners.put(typeName, list);
}
list.add(listener);
HollowTypeReadState typeState = typeStates.get(typeName);
if(typeState != null)
typeState.addListener(listener);
}
void wireTypeStatesToSchemas() {
for(HollowTypeReadState state : typeStates.values()) {
switch(state.getSchema().getSchemaType()) {
case OBJECT:
HollowObjectSchema objSchema = (HollowObjectSchema)state.getSchema();
for(int i=0;i<objSchema.numFields();i++) {
if(objSchema.getReferencedType(i) != null)
objSchema.setReferencedTypeState(i, typeStates.get(objSchema.getReferencedType(i)));
}
break;
case LIST:
HollowListSchema listSchema = (HollowListSchema)state.getSchema();
listSchema.setElementTypeState(typeStates.get(listSchema.getElementType()));
break;
case SET:
HollowSetSchema setSchema = (HollowSetSchema)state.getSchema();
setSchema.setElementTypeState(typeStates.get(setSchema.getElementType()));
((HollowSetTypeReadState)state).buildKeyDeriver();
break;
case MAP:
HollowMapSchema mapSchema = (HollowMapSchema)state.getSchema();
mapSchema.setKeyTypeState(typeStates.get(mapSchema.getKeyType()));
mapSchema.setValueTypeState(typeStates.get(mapSchema.getValueType()));
((HollowMapTypeReadState)state).buildKeyDeriver();
break;
}
}
}
/**
* Calculates the data size of a read state engine which is defined as the approximate heap footprint by iterating
* over the read state shards in each type state
* @return the heap footprint of the read state engine
*/
public long calcApproxDataSize() {
return this.getAllTypes()
.stream()
.map(this::getTypeState)
.mapToLong(HollowTypeReadState::getApproximateHeapFootprintInBytes)
.sum();
}
@Override
public HollowTypeDataAccess getTypeDataAccess(String type) {
return typeStates.get(type);
}
@Override
public HollowTypeDataAccess getTypeDataAccess(String type, int ordinal) {
return typeStates.get(type);
}
@Override
public Collection<String> getAllTypes() {
return typeStates.keySet();
}
public HollowTypeReadState getTypeState(String type) {
return typeStates.get(type);
}
public Collection<HollowTypeReadState> getTypeStates() {
return typeStates.values();
}
public ArraySegmentRecycler getMemoryRecycler() {
return memoryRecycler;
}
public boolean isListenToAllPopulatedOrdinals() {
return listenToAllPopulatedOrdinals;
}
/**
* Experimental: When there are no updates for a type shard in a delta, skip updating that type shard.
*/
public void setSkipTypeShardUpdateWithNoAdditions(boolean skipTypeShardUpdateWithNoAdditions) {
this.skipTypeShardUpdateWithNoAdditions = skipTypeShardUpdateWithNoAdditions;
}
public boolean isSkipTypeShardUpdateWithNoAdditions() {
return skipTypeShardUpdateWithNoAdditions;
}
@Override
public List<HollowSchema> getSchemas() {
List<HollowSchema> schemas = new ArrayList<HollowSchema>();
for(Map.Entry<String, HollowTypeReadState> entry : typeStates.entrySet()) {
schemas.add(entry.getValue().getSchema());
}
return schemas;
}
@Override
public HollowSchema getSchema(String type) {
HollowTypeReadState typeState = getTypeState(type);
return typeState == null ? null : typeState.getSchema();
}
@Override
public HollowSchema getNonNullSchema(String type) {
HollowSchema schema = getSchema(type);
if (schema == null) {
throw new SchemaNotFoundException(type, getAllTypes());
}
return schema;
}
protected void afterInitialization() { }
public void setMissingDataHandler(MissingDataHandler handler) {
this.missingDataHandler = handler;
}
@Override
public MissingDataHandler getMissingDataHandler() {
return missingDataHandler;
}
public void setHeaderTags(Map<String, String> headerTags) {
this.headerTags = headerTags;
populatedDefinedHashCodesTypesIfHeaderTagIsPresent();
}
@Override
public Map<String, String> getHeaderTags() {
return headerTags;
}
@Override
public String getHeaderTag(String name) {
return headerTags.get(name);
}
public void invalidate() {
listeners.clear();
for(Map.Entry<String, HollowTypeReadState> entry : typeStates.entrySet())
entry.getValue().invalidate();
memoryRecycler = null;
}
@Override
public void resetSampling() {
for(Map.Entry<String, HollowTypeReadState> entry : typeStates.entrySet())
entry.getValue().getSampler().reset();
}
@Override
public boolean hasSampleResults() {
for(Map.Entry<String, HollowTypeReadState> entry : typeStates.entrySet())
if(entry.getValue().getSampler().hasSampleResults())
return true;
return false;
}
public boolean updatedLastCycle() {
for(Map.Entry<String, HollowTypeReadState> entry : typeStates.entrySet()) {
if(entry.getValue().getListener(PopulatedOrdinalListener.class).updatedLastCycle())
return true;
}
return false;
}
public Set<String> getTypesWithDefinedHashCodes() {
return typesWithDefinedHashCodes;
}
public long getCurrentRandomizedTag() {
return currentRandomizedTag;
}
public long getOriginRandomizedTag() {
return originRandomizedTag;
}
public void setCurrentRandomizedTag(long currentRandomizedTag) {
this.currentRandomizedTag = currentRandomizedTag;
}
public void setOriginRandomizedTag(long originRandomizedTag) {
this.originRandomizedTag = originRandomizedTag;
}
private void populatedDefinedHashCodesTypesIfHeaderTagIsPresent() {
String definedHashCodesTag = headerTags.get(HollowObjectHashCodeFinder.DEFINED_HASH_CODES_HEADER_NAME);
if(definedHashCodesTag == null || "".equals(definedHashCodesTag)) {
this.typesWithDefinedHashCodes = Collections.<String>emptySet();
} else {
Set<String>definedHashCodeTypes = new HashSet<String>();
for(String type : definedHashCodesTag.split(","))
definedHashCodeTypes.add(type);
this.typesWithDefinedHashCodes = definedHashCodeTypes;
}
}
}
| 9,087 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine/HollowTypeReadState.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.engine;
import com.netflix.hollow.api.sampling.HollowSampler;
import com.netflix.hollow.core.memory.MemoryMode;
import com.netflix.hollow.core.memory.encoding.GapEncodedVariableLengthIntegerReader;
import com.netflix.hollow.core.memory.pool.ArraySegmentRecycler;
import com.netflix.hollow.core.read.HollowBlobInput;
import com.netflix.hollow.core.read.dataaccess.HollowDataAccess;
import com.netflix.hollow.core.read.dataaccess.HollowTypeDataAccess;
import com.netflix.hollow.core.schema.HollowSchema;
import com.netflix.hollow.tools.checksum.HollowChecksum;
import java.io.IOException;
import java.util.Arrays;
import java.util.BitSet;
import java.util.stream.Stream;
/**
* A HollowTypeReadState contains and is the root handle to all of the records of a specific type in
* a {@link HollowReadStateEngine}.
*/
public abstract class HollowTypeReadState implements HollowTypeDataAccess {
protected static final HollowTypeStateListener[] EMPTY_LISTENERS = new HollowTypeStateListener[0];
protected final HollowReadStateEngine stateEngine;
protected final MemoryMode memoryMode;
protected final HollowSchema schema;
protected HollowTypeStateListener[] stateListeners;
public HollowTypeReadState(HollowReadStateEngine stateEngine, MemoryMode memoryMode, HollowSchema schema) {
this.stateEngine = stateEngine;
this.memoryMode = memoryMode;
this.schema = schema;
this.stateListeners = EMPTY_LISTENERS;
}
/**
* Add a {@link HollowTypeStateListener} to this type.
* @param listener the listener to add
*/
public void addListener(HollowTypeStateListener listener) {
HollowTypeStateListener[] newListeners = Arrays.copyOf(stateListeners, stateListeners.length + 1);
newListeners[newListeners.length - 1] = listener;
stateListeners = newListeners;
}
/**
* Remove a specific {@link HollowTypeStateListener} from this type.
* @param listener the listener to remove
*/
public void removeListener(HollowTypeStateListener listener) {
if (stateListeners.length == 0)
return;
stateListeners = Stream.of(stateListeners)
.filter(l -> l != listener)
.toArray(HollowTypeStateListener[]::new);
}
/**
* @return all {@link HollowTypeStateListener}s currently associated with this type.
*/
public HollowTypeStateListener[] getListeners() {
return stateListeners;
}
/**
* @param listenerClazz the listener class
* @return a {@link HollowTypeStateListener} of the specified class currently associated with this type, or
* null if none is currently attached.
* @param <T> the type of the listener
*/
@SuppressWarnings("unchecked")
public <T extends HollowTypeStateListener> T getListener(Class<T> listenerClazz) {
for (HollowTypeStateListener listener : stateListeners) {
if (listenerClazz.isAssignableFrom(listener.getClass())) {
return (T) listener;
}
}
return null;
}
/**
* Returns the BitSet containing the currently populated ordinals in this type state.
* <p>
* WARNING: Do not modify the returned BitSet.
* @return the bit containing the currently populated ordinals
*/
public BitSet getPopulatedOrdinals() {
return getListener(PopulatedOrdinalListener.class).getPopulatedOrdinals();
}
/**
* Returns the BitSet containing the populated ordinals in this type state prior to the previous delta transition.
* <p>
* WARNING: Do not modify the returned BitSet.
* @return the bit containing the previously populated ordinals
*/
public BitSet getPreviousOrdinals() {
return getListener(PopulatedOrdinalListener.class).getPreviousOrdinals();
}
/**
* @return The maximum ordinal currently populated in this type state.
*/
public abstract int maxOrdinal();
public abstract void readSnapshot(HollowBlobInput in, ArraySegmentRecycler recycler) throws IOException;
public abstract void readSnapshot(HollowBlobInput in, ArraySegmentRecycler recycler, int numShards) throws IOException;
public abstract void applyDelta(HollowBlobInput in, HollowSchema deltaSchema, ArraySegmentRecycler memoryRecycler, int deltaNumShards) throws IOException;
protected boolean shouldReshard(int currNumShards, int deltaNumShards) {
return currNumShards!=0 && deltaNumShards!=0 && currNumShards!=deltaNumShards;
}
public HollowSchema getSchema() {
return schema;
}
@Override
public HollowDataAccess getDataAccess() {
return stateEngine;
}
/**
* @return the {@link HollowReadStateEngine} which this type state belongs to.
*/
public HollowReadStateEngine getStateEngine() {
return stateEngine;
}
protected void notifyListenerAboutDeltaChanges(GapEncodedVariableLengthIntegerReader removals, GapEncodedVariableLengthIntegerReader additions, int shardNumber, int numShards) {
for(HollowTypeStateListener stateListener : stateListeners) {
removals.reset();
int removedOrdinal = removals.nextElement();
while(removedOrdinal < Integer.MAX_VALUE) {
stateListener.removedOrdinal((removedOrdinal * numShards) + shardNumber);
removals.advance();
removedOrdinal = removals.nextElement();
}
additions.reset();
int addedOrdinal = additions.nextElement();
while(addedOrdinal < Integer.MAX_VALUE) {
stateListener.addedOrdinal((addedOrdinal * numShards) + shardNumber);
additions.advance();
addedOrdinal = additions.nextElement();
}
}
}
public abstract HollowSampler getSampler();
protected abstract void invalidate();
public HollowChecksum getChecksum(HollowSchema withSchema) {
HollowChecksum cksum = new HollowChecksum();
applyToChecksum(cksum, withSchema);
return cksum;
}
protected abstract void applyToChecksum(HollowChecksum checksum, HollowSchema withSchema);
@Override
public HollowTypeReadState getTypeState() {
return this;
}
/**
* @return an approximate accounting of the current heap footprint occupied by this type state.
*/
public abstract long getApproximateHeapFootprintInBytes();
/**
* @return an approximate accounting of the current cost of the "ordinal holes" in this type state.
*/
public abstract long getApproximateHoleCostInBytes();
/**
* @return The number of shards into which this type is split. Sharding is transparent, so this has no effect on normal usage.
*/
public abstract int numShards();
}
| 9,088 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine/HollowBlobHeaderReader.java | /*
* Copyright 2016-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.engine;
import com.netflix.hollow.core.HollowBlobHeader;
import com.netflix.hollow.core.HollowBlobOptionalPartHeader;
import com.netflix.hollow.core.memory.encoding.VarInt;
import com.netflix.hollow.core.read.HollowBlobInput;
import com.netflix.hollow.core.schema.HollowSchema;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Contains serialization logic for the hollow blob headers.
*
* @see HollowBlobHeader
*
*/
public class HollowBlobHeaderReader {
public HollowBlobHeader readHeader(InputStream is) throws IOException {
return readHeader(HollowBlobInput.serial(is));
}
public HollowBlobHeader readHeader(HollowBlobInput in) throws IOException {
HollowBlobHeader header = new HollowBlobHeader();
int headerVersion = in.readInt();
if(headerVersion != HollowBlobHeader.HOLLOW_BLOB_VERSION_HEADER) {
throw new IOException("The HollowBlob you are trying to read is incompatible. "
+ "The expected Hollow blob version was " + HollowBlobHeader.HOLLOW_BLOB_VERSION_HEADER
+ " but the actual version was " + headerVersion);
}
header.setBlobFormatVersion(headerVersion);
header.setOriginRandomizedTag(in.readLong());
header.setDestinationRandomizedTag(in.readLong());
int oldBytesToSkip = VarInt.readVInt(in); /// pre v2.2.0 envelope
if(oldBytesToSkip != 0) {
header.setSchemas(readSchemas(in));
/// forwards-compatibility, new data can be added here.
skipForwardCompatibilityBytes(in);
}
Map<String, String> headerTags = readHeaderTags(in);
header.setHeaderTags(headerTags);
return header;
}
public HollowBlobOptionalPartHeader readPartHeader(InputStream is) throws IOException {
return readPartHeader(HollowBlobInput.serial(is));
}
public HollowBlobOptionalPartHeader readPartHeader(HollowBlobInput in) throws IOException {
int headerVersion = in.readInt();
if(headerVersion != HollowBlobOptionalPartHeader.HOLLOW_BLOB_PART_VERSION_HEADER) {
throw new IOException("The HollowBlob optional part you are trying to read is incompatible. "
+ "The expected Hollow blob version was " + HollowBlobHeader.HOLLOW_BLOB_VERSION_HEADER
+ " but the actual version was " + headerVersion);
}
HollowBlobOptionalPartHeader header = new HollowBlobOptionalPartHeader(in.readUTF());
header.setOriginRandomizedTag(in.readLong());
header.setDestinationRandomizedTag(in.readLong());
header.setSchemas(readSchemas(in));
/// forwards-compatibility, new data can be added here.
skipForwardCompatibilityBytes(in);
return header;
}
private List<HollowSchema> readSchemas(HollowBlobInput in) throws IOException {
int numSchemas = VarInt.readVInt(in);
List<HollowSchema> schemas = new ArrayList<HollowSchema>(numSchemas);
for(int i=0;i<numSchemas;i++)
schemas.add(HollowSchema.readFrom(in));
return schemas;
}
private void skipForwardCompatibilityBytes(HollowBlobInput in) throws IOException, EOFException {
int bytesToSkip = VarInt.readVInt(in);
while(bytesToSkip > 0) {
int skippedBytes = (int)in.skipBytes(bytesToSkip);
if(skippedBytes < 0)
throw new EOFException();
bytesToSkip -= skippedBytes;
}
}
/**
* Map of string header tags reading.
*
* @param in the Hollow blob input
* @throws IOException
*/
private Map<String, String> readHeaderTags(HollowBlobInput in) throws IOException {
int numHeaderTags = in.readShort();
Map<String, String> headerTags = new HashMap<String, String>();
for (int i = 0; i < numHeaderTags; i++) {
headerTags.put(in.readUTF(), in.readUTF());
}
return headerTags;
}
}
| 9,089 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine/SnapshotPopulatedOrdinalsReader.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.engine;
import com.netflix.hollow.core.read.HollowBlobInput;
import java.io.IOException;
public class SnapshotPopulatedOrdinalsReader {
/**
* Read populated ordinals as a bit set from a {@code HollowBlobInput}, and notify a listener for each populated ordinal.
*
* @param in the Hollow blob input data
* @param listeners the type state listeners
* @throws IOException if the ordinals cannot be read
* @author dkoszewnik
*/
public static void readOrdinals(HollowBlobInput in, HollowTypeStateListener[] listeners) throws IOException {
int numLongs = in.readInt();
int currentOrdinal = 0;
for(int i=0;i<numLongs;i++) {
long l = in.readLong();
notifyPopulatedOrdinals(l, currentOrdinal, listeners);
currentOrdinal += 64;
}
}
private static void notifyPopulatedOrdinals(long l, int ordinal, HollowTypeStateListener[] listeners) {
if(l == 0)
return;
int stopOrdinal = ordinal + 64;
while(ordinal < stopOrdinal) {
long mask = 1L << ordinal;
if((l & mask) != 0) {
for(int i=0; i<listeners.length; i++) {
listeners[i].addedOrdinal(ordinal);
}
}
ordinal++;
}
}
public static void discardOrdinals(HollowBlobInput in) throws IOException {
long numLongs = in.readInt();
long bytesToSkip = numLongs * 8;
while(bytesToSkip > 0)
bytesToSkip -= in.skipBytes(bytesToSkip);
}
}
| 9,090 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine/HollowBlobReader.java | /*
* Copyright 2016-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.engine;
import com.netflix.hollow.core.HollowBlobHeader;
import com.netflix.hollow.core.HollowBlobOptionalPartHeader;
import com.netflix.hollow.core.memory.MemoryMode;
import com.netflix.hollow.core.memory.encoding.VarInt;
import com.netflix.hollow.core.read.HollowBlobInput;
import com.netflix.hollow.core.read.OptionalBlobPartInput;
import com.netflix.hollow.core.read.engine.list.HollowListTypeReadState;
import com.netflix.hollow.core.read.engine.map.HollowMapTypeReadState;
import com.netflix.hollow.core.read.engine.object.HollowObjectTypeReadState;
import com.netflix.hollow.core.read.engine.set.HollowSetTypeReadState;
import com.netflix.hollow.core.read.filter.HollowFilterConfig;
import com.netflix.hollow.core.read.filter.TypeFilter;
import com.netflix.hollow.core.schema.HollowListSchema;
import com.netflix.hollow.core.schema.HollowMapSchema;
import com.netflix.hollow.core.schema.HollowObjectSchema;
import com.netflix.hollow.core.schema.HollowSchema;
import com.netflix.hollow.core.schema.HollowSetSchema;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.TreeSet;
import java.util.logging.Logger;
/**
* A HollowBlobReader is used to populate and update data in a {@link HollowReadStateEngine}, via the consumption
* of snapshot and delta blobs. Caller can choose between on-heap or shared-memory mode; defaults to (and for
* backwards compatibility) on-heap mode.
*/
public class HollowBlobReader {
private final Logger log = Logger.getLogger(HollowBlobReader.class.getName());
private final HollowReadStateEngine stateEngine;
private final MemoryMode memoryMode;
private final HollowBlobHeaderReader headerReader;
public HollowBlobReader(HollowReadStateEngine stateEngine) {
this(stateEngine, new HollowBlobHeaderReader());
}
public HollowBlobReader(HollowReadStateEngine stateEngine, HollowBlobHeaderReader headerReader) {
this(stateEngine, headerReader, MemoryMode.ON_HEAP);
}
public HollowBlobReader(HollowReadStateEngine stateEngine, MemoryMode memoryMode) {
this(stateEngine, new HollowBlobHeaderReader(), memoryMode);
}
public HollowBlobReader(HollowReadStateEngine stateEngine, HollowBlobHeaderReader headerReader, MemoryMode memoryMode) {
this.stateEngine = stateEngine;
this.headerReader = headerReader;
this.memoryMode = memoryMode;
}
/**
* Initialize the state engine using a snapshot blob from the provided input stream.
*
* @param is the input stream to read the snapshot from
* @throws IOException if the snapshot could not be read
*/
public void readSnapshot(InputStream is) throws IOException {
HollowBlobInput hbi = HollowBlobInput.serial(is);
readSnapshot(hbi);
}
/**
* Initialize the state engine using a snapshot blob from the provided HollowBlobInput.
*
* @param in the Hollow blob input to read the snapshot from
* @throws IOException if the snapshot could not be read
*/
public void readSnapshot(HollowBlobInput in) throws IOException {
readSnapshot(in, new HollowFilterConfig(true));
}
/**
* Initialize the file engine using a snapshot from the provided RandomAccessFile.
* <p>
* Apply the provided {@link HollowFilterConfig} to the state.
*
* @param is the input stream to read the snapshot from
* @param filter the filtering configuration to filter the snapshot
* @throws IOException if the snapshot could not be read
*
* @deprecated use {@link #readSnapshot(InputStream, TypeFilter)}
*/
@Deprecated
public void readSnapshot(InputStream is, HollowFilterConfig filter) throws IOException {
HollowBlobInput hbi = HollowBlobInput.serial(is);
readSnapshot(hbi, (TypeFilter) filter);
}
/**
* Initialize the file engine using a snapshot from the provided input stream.
* <p>
* Apply the provided {@link TypeFilter} to the state.
*
* @param is the input stream to read the snapshot from
* @param filter the filtering configuration to filter the snapshot
* @throws IOException if the snapshot could not be read
*/
public void readSnapshot(InputStream is, TypeFilter filter) throws IOException {
HollowBlobInput hbi = HollowBlobInput.serial(is);
readSnapshot(hbi, filter);
}
/**
* Initialize the file engine using a snapshot from the provided Hollow Blob Input.
* <p>
* Apply the provided {@link TypeFilter} to the state.
*
* @param in the Hollow blob input to read the snapshot from
* @param filter the filtering configuration to filter the snapshot
* @throws IOException if the snapshot could not be read
*/
public void readSnapshot(HollowBlobInput in, TypeFilter filter) throws IOException {
readSnapshot(in, null, filter);
}
public void readSnapshot(HollowBlobInput in, OptionalBlobPartInput optionalParts) throws IOException {
readSnapshot(in, optionalParts, new HollowFilterConfig(true));
}
public void readSnapshot(HollowBlobInput in, OptionalBlobPartInput optionalParts, TypeFilter filter) throws IOException {
validateMemoryMode(in.getMemoryMode());
Map<String, HollowBlobInput> optionalPartInputs = null;
if(optionalParts != null)
optionalPartInputs = optionalParts.getInputsByPartName(in.getMemoryMode());
HollowBlobHeader header = readHeader(in, false);
List<HollowBlobOptionalPartHeader> partHeaders = readPartHeaders(header, optionalPartInputs, in.getMemoryMode());
List<HollowSchema> allSchemas = combineSchemas(header, partHeaders);
filter = filter.resolve(allSchemas);
notifyBeginUpdate();
long startTime = System.currentTimeMillis();
int numStates = VarInt.readVInt(in);
Collection<String> typeNames = new TreeSet<>();
for(int i=0;i<numStates;i++) {
String typeName = readTypeStateSnapshot(in, filter);
typeNames.add(typeName);
}
if(optionalPartInputs != null) {
for(Map.Entry<String, HollowBlobInput> optionalPartEntry : optionalPartInputs.entrySet()) {
numStates = VarInt.readVInt(optionalPartEntry.getValue());
for(int i=0;i<numStates;i++) {
String typeName = readTypeStateSnapshot(optionalPartEntry.getValue(), filter);
typeNames.add(typeName);
}
}
}
stateEngine.wireTypeStatesToSchemas();
long endTime = System.currentTimeMillis();
log.info("SNAPSHOT COMPLETED IN " + (endTime - startTime) + "ms");
log.info("TYPES: " + typeNames);
notifyEndUpdate();
stateEngine.afterInitialization();
}
/**
* Update the state engine using a delta (or reverse delta) blob from the provided input stream.
* <p>
* If a {@link HollowFilterConfig} was applied at the time the {@link HollowReadStateEngine} was initialized
* with a snapshot, it will continue to be in effect after the state is updated.
*
* @param in the input stream to read the delta from
* @throws IOException if the delta could not be applied
*/
public void applyDelta(InputStream in) throws IOException {
HollowBlobInput hbi = HollowBlobInput.serial(in);
applyDelta(hbi);
}
/**
* Update the state engine using a delta (or reverse delta) blob from the provided HollowBlobInput.
* <p>
* If a {@link HollowFilterConfig} was applied at the time the {@link HollowReadStateEngine} was initialized
* with a snapshot, it will continue to be in effect after the state is updated.
*
* @param in the Hollow blob input to read the delta from
* @throws IOException if the delta could not be applied
*/
public void applyDelta(HollowBlobInput in) throws IOException {
applyDelta(in, null);
}
public void applyDelta(HollowBlobInput in, OptionalBlobPartInput optionalParts) throws IOException {
validateMemoryMode(in.getMemoryMode());
Map<String, HollowBlobInput> optionalPartInputs = null;
if(optionalParts != null)
optionalPartInputs = optionalParts.getInputsByPartName(in.getMemoryMode());
HollowBlobHeader header = readHeader(in, true);
List<HollowBlobOptionalPartHeader> partHeaders = readPartHeaders(header, optionalPartInputs, in.getMemoryMode());
notifyBeginUpdate();
long startTime = System.currentTimeMillis();
int numStates = VarInt.readVInt(in);
Collection<String> typeNames = new TreeSet<String>();
for(int i=0;i<numStates;i++) {
String typeName = readTypeStateDelta(in);
typeNames.add(typeName);
stateEngine.getMemoryRecycler().swap();
}
if(optionalPartInputs != null) {
for(Map.Entry<String, HollowBlobInput> optionalPartEntry : optionalPartInputs.entrySet()) {
numStates = VarInt.readVInt(optionalPartEntry.getValue());
for(int i=0;i<numStates;i++) {
String typeName = readTypeStateDelta(optionalPartEntry.getValue());
typeNames.add(typeName);
stateEngine.getMemoryRecycler().swap();
}
}
}
long endTime = System.currentTimeMillis();
log.info("DELTA COMPLETED IN " + (endTime - startTime) + "ms");
log.info("TYPES: " + typeNames);
notifyEndUpdate();
}
private HollowBlobHeader readHeader(HollowBlobInput in, boolean isDelta) throws IOException {
HollowBlobHeader header = headerReader.readHeader(in);
if(isDelta && header.getOriginRandomizedTag() != stateEngine.getCurrentRandomizedTag())
throw new IOException("Attempting to apply a delta to a state from which it was not originated!");
stateEngine.setCurrentRandomizedTag(header.getDestinationRandomizedTag());
stateEngine.setOriginRandomizedTag(header.getOriginRandomizedTag());
stateEngine.setHeaderTags(header.getHeaderTags());
return header;
}
private List<HollowBlobOptionalPartHeader> readPartHeaders(HollowBlobHeader header, Map<String, HollowBlobInput> inputsByPartName, MemoryMode mode) throws IOException {
if(inputsByPartName == null)
return Collections.emptyList();
List<HollowBlobOptionalPartHeader> list = new ArrayList<>(inputsByPartName.size());
for(Map.Entry<String, HollowBlobInput> entry : inputsByPartName.entrySet()) {
HollowBlobOptionalPartHeader partHeader = headerReader.readPartHeader(entry.getValue());
if(!partHeader.getPartName().equals(entry.getKey()))
throw new IllegalArgumentException("Optional blob part expected name " + entry.getKey() + " but was " + partHeader.getPartName());
if(partHeader.getOriginRandomizedTag() != header.getOriginRandomizedTag()
|| partHeader.getDestinationRandomizedTag() != header.getDestinationRandomizedTag())
throw new IllegalArgumentException("Optional blob part " + entry.getKey() + " does not appear to be matched with the main input");
list.add(partHeader);
}
return list;
}
private List<HollowSchema> combineSchemas(HollowBlobHeader header, List<HollowBlobOptionalPartHeader> partHeaders) throws IOException {
if(partHeaders.isEmpty())
return header.getSchemas();
List<HollowSchema> schemas = new ArrayList<>(header.getSchemas());
for(HollowBlobOptionalPartHeader partHeader : partHeaders) {
schemas.addAll(partHeader.getSchemas());
}
return schemas;
}
private void notifyBeginUpdate() {
for(HollowTypeReadState typeFile: stateEngine.getTypeStates()) {
for(HollowTypeStateListener listener : typeFile.getListeners()) {
listener.beginUpdate();
}
}
}
private void notifyEndUpdate() {
for(HollowTypeReadState typeFile : stateEngine.getTypeStates()) {
for(HollowTypeStateListener listener : typeFile.getListeners()) {
listener.endUpdate();
}
}
}
private String readTypeStateSnapshot(HollowBlobInput in, TypeFilter filter) throws IOException {
HollowSchema schema = HollowSchema.readFrom(in);
int numShards = readNumShards(in);
String typeName = schema.getName();
if(schema instanceof HollowObjectSchema) {
if(!filter.includes(typeName)) {
HollowObjectTypeReadState.discardSnapshot(in, (HollowObjectSchema)schema, numShards);
} else {
HollowObjectSchema unfilteredSchema = (HollowObjectSchema)schema;
HollowObjectSchema filteredSchema = unfilteredSchema.filterSchema(filter);
populateTypeStateSnapshotWithNumShards(in, new HollowObjectTypeReadState(stateEngine, memoryMode, filteredSchema, unfilteredSchema), numShards);
}
} else if (schema instanceof HollowListSchema) {
if(!filter.includes(typeName)) {
HollowListTypeReadState.discardSnapshot(in, numShards);
} else {
populateTypeStateSnapshot(in, new HollowListTypeReadState(stateEngine, memoryMode, (HollowListSchema)schema, numShards));
}
} else if(schema instanceof HollowSetSchema) {
if(!filter.includes(typeName)) {
HollowSetTypeReadState.discardSnapshot(in, numShards);
} else {
populateTypeStateSnapshot(in, new HollowSetTypeReadState(stateEngine, memoryMode, (HollowSetSchema)schema, numShards));
}
} else if(schema instanceof HollowMapSchema) {
if(!filter.includes(typeName)) {
HollowMapTypeReadState.discardSnapshot(in, numShards);
} else {
populateTypeStateSnapshot(in, new HollowMapTypeReadState(stateEngine, memoryMode, (HollowMapSchema)schema, numShards));
}
}
return typeName;
}
private void populateTypeStateSnapshot(HollowBlobInput in, HollowTypeReadState typeState) throws IOException {
stateEngine.addTypeState(typeState);
typeState.readSnapshot(in, stateEngine.getMemoryRecycler());
}
private void populateTypeStateSnapshotWithNumShards(HollowBlobInput in, HollowTypeReadState typeState, int numShards) throws IOException {
if (numShards<=0 || ((numShards&(numShards-1))!=0)) {
throw new IllegalArgumentException("Number of shards must be a power of 2!");
}
stateEngine.addTypeState(typeState);
typeState.readSnapshot(in, stateEngine.getMemoryRecycler(), numShards);
}
private String readTypeStateDelta(HollowBlobInput in) throws IOException {
HollowSchema schema = HollowSchema.readFrom(in);
int numShards = readNumShards(in);
HollowTypeReadState typeState = stateEngine.getTypeState(schema.getName());
if(typeState != null) {
typeState.applyDelta(in, schema, stateEngine.getMemoryRecycler(), numShards);
} else {
discardDelta(in, schema, numShards);
}
return schema.getName();
}
private int readNumShards(HollowBlobInput in) throws IOException {
int backwardsCompatibilityBytes = VarInt.readVInt(in);
if(backwardsCompatibilityBytes == 0)
return 1; /// produced by a version of hollow prior to 2.1.0, always only 1 shard.
skipForwardsCompatibilityBytes(in);
return VarInt.readVInt(in);
}
private void skipForwardsCompatibilityBytes(HollowBlobInput in) throws IOException {
int bytesToSkip = VarInt.readVInt(in);
while(bytesToSkip > 0) {
int skippedBytes = (int) in.skipBytes(bytesToSkip);
if(skippedBytes < 0)
throw new EOFException();
bytesToSkip -= skippedBytes;
}
}
private void discardDelta(HollowBlobInput in, HollowSchema schema, int numShards) throws IOException {
if(schema instanceof HollowObjectSchema)
HollowObjectTypeReadState.discardDelta(in, (HollowObjectSchema)schema, numShards);
else if(schema instanceof HollowListSchema)
HollowListTypeReadState.discardDelta(in, numShards);
else if(schema instanceof HollowSetSchema)
HollowSetTypeReadState.discardDelta(in, numShards);
else if(schema instanceof HollowMapSchema)
HollowMapTypeReadState.discardDelta(in, numShards);
}
private void validateMemoryMode(MemoryMode inputMode) {
if (!memoryMode.equals(inputMode)) {
throw new IllegalStateException(String.format("HollowBlobReader is configured for memory mode %s but " +
"HollowBlobInput of mode %s was provided", memoryMode, inputMode));
}
}
}
| 9,091 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine/SetMapKeyHasher.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.engine;
import com.netflix.hollow.core.memory.encoding.HashCodes;
import com.netflix.hollow.core.schema.HollowObjectSchema.FieldType;
/**
* Hashes POJO keys according to the Set/Map hash key specification. This is used
* in order to lookup matching records in these hashtable data structures.
*/
public class SetMapKeyHasher {
/**
* Hash a key
*
* @param key the key
* @param fieldType the respective FieldTypes of each key.
* @return the hash code
*/
public static int hash(Object key[], FieldType fieldType[]) {
int hash = 0;
for (int i = 0; i < key.length; i++) {
hash *= 31;
hash ^= hash(key[i], fieldType[i]);
}
return hash;
}
/**
* Hash a single key field
*
* @param key the key
* @param fieldType the FieldType of the key.
* @return the hash code
*/
public static int hash(Object key, FieldType fieldType) {
switch(fieldType) {
case INT:
return HashCodes.hashInt(((Integer)key).intValue());
case LONG:
long longVal = ((Long)key).longValue();
return HashCodes.hashInt((int)(longVal ^ (longVal >>> 32)));
case REFERENCE:
return HashCodes.hashInt(((Integer)key).intValue());
case BYTES:
return HashCodes.hashInt(HashCodes.hashCode((byte[])key));
case STRING:
return HashCodes.hashInt(key.hashCode());
case BOOLEAN:
return HashCodes.hashInt(((Boolean)key).booleanValue() ? 1231 : 1237);
case DOUBLE:
long longBits = Double.doubleToRawLongBits(((Double)key).doubleValue());
return HashCodes.hashInt((int)(longBits ^ (longBits >>> 32)));
case FLOAT:
return HashCodes.hashInt(Float.floatToRawIntBits(((Float)key).floatValue()));
default:
throw new IllegalArgumentException("Unknown field type: " + fieldType);
}
}
}
| 9,092 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine/HollowTypeStateListener.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.engine;
/**
* A HollowTypeStateListener is registered with a specific {@link HollowTypeReadState}
* to receive callback notifications when deltas are applied to a {@link HollowReadStateEngine}
*/
public interface HollowTypeStateListener {
/**
* Called immediately before a delta update is applied to the state engine
*/
void beginUpdate();
/**
* Called once for each record which is added to the registered type.
* @param ordinal the ordinal of an object that was added
*/
void addedOrdinal(int ordinal);
/**
* Called once for each record which is removed from the registered type.
* @param ordinal the ordinal of an object that was removed
*/
void removedOrdinal(int ordinal);
/**
* Called immediately after a delta update is applied to the state engine.
*/
void endUpdate();
}
| 9,093 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine/map/HollowMapTypeReadState.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.engine.map;
import static com.netflix.hollow.core.HollowConstants.ORDINAL_NONE;
import com.netflix.hollow.api.sampling.DisabledSamplingDirector;
import com.netflix.hollow.api.sampling.HollowMapSampler;
import com.netflix.hollow.api.sampling.HollowSampler;
import com.netflix.hollow.api.sampling.HollowSamplingDirector;
import com.netflix.hollow.core.index.key.HollowPrimaryKeyValueDeriver;
import com.netflix.hollow.core.memory.MemoryMode;
import com.netflix.hollow.core.memory.encoding.GapEncodedVariableLengthIntegerReader;
import com.netflix.hollow.core.memory.encoding.VarInt;
import com.netflix.hollow.core.memory.pool.ArraySegmentRecycler;
import com.netflix.hollow.core.read.HollowBlobInput;
import com.netflix.hollow.core.read.dataaccess.HollowMapTypeDataAccess;
import com.netflix.hollow.core.read.engine.HollowReadStateEngine;
import com.netflix.hollow.core.read.engine.HollowTypeReadState;
import com.netflix.hollow.core.read.engine.PopulatedOrdinalListener;
import com.netflix.hollow.core.read.engine.SnapshotPopulatedOrdinalsReader;
import com.netflix.hollow.core.read.filter.HollowFilterConfig;
import com.netflix.hollow.core.read.iterator.EmptyMapOrdinalIterator;
import com.netflix.hollow.core.read.iterator.HollowMapEntryOrdinalIterator;
import com.netflix.hollow.core.read.iterator.HollowMapEntryOrdinalIteratorImpl;
import com.netflix.hollow.core.schema.HollowMapSchema;
import com.netflix.hollow.core.schema.HollowObjectSchema.FieldType;
import com.netflix.hollow.core.schema.HollowSchema;
import com.netflix.hollow.tools.checksum.HollowChecksum;
import java.io.IOException;
import java.util.BitSet;
/**
* A {@link HollowTypeReadState} for MAP type records.
*/
public class HollowMapTypeReadState extends HollowTypeReadState implements HollowMapTypeDataAccess {
private final HollowMapSampler sampler;
private final int shardNumberMask;
private final int shardOrdinalShift;
private final HollowMapTypeReadStateShard shards[];
private HollowPrimaryKeyValueDeriver keyDeriver;
private int maxOrdinal;
public HollowMapTypeReadState(HollowReadStateEngine stateEngine, HollowMapSchema schema, int numShards) {
this(stateEngine, MemoryMode.ON_HEAP, schema, numShards);
}
public HollowMapTypeReadState(HollowReadStateEngine stateEngine, MemoryMode memoryMode, HollowMapSchema schema, int numShards) {
super(stateEngine, memoryMode, schema);
this.sampler = new HollowMapSampler(schema.getName(), DisabledSamplingDirector.INSTANCE);
this.shardNumberMask = numShards - 1;
this.shardOrdinalShift = 31 - Integer.numberOfLeadingZeros(numShards);
if(numShards < 1 || 1 << shardOrdinalShift != numShards)
throw new IllegalArgumentException("Number of shards must be a power of 2!");
HollowMapTypeReadStateShard shards[] = new HollowMapTypeReadStateShard[numShards];
for(int i=0; i<shards.length; i++)
shards[i] = new HollowMapTypeReadStateShard();
this.shards = shards;
}
@Override
public void readSnapshot(HollowBlobInput in, ArraySegmentRecycler memoryRecycler, int numShards) throws IOException {
throw new UnsupportedOperationException("This type does not yet support numShards specification when reading snapshot");
}
@Override
public void readSnapshot(HollowBlobInput in, ArraySegmentRecycler memoryRecycler) throws IOException {
if(shards.length > 1)
maxOrdinal = VarInt.readVInt(in);
for(int i=0; i<shards.length; i++) {
HollowMapTypeDataElements snapshotData = new HollowMapTypeDataElements(memoryMode, memoryRecycler);
snapshotData.readSnapshot(in);
shards[i].setCurrentData(snapshotData);
}
if(shards.length == 1)
maxOrdinal = shards[0].currentDataElements().maxOrdinal;
SnapshotPopulatedOrdinalsReader.readOrdinals(in, stateListeners);
}
@Override
public void applyDelta(HollowBlobInput in, HollowSchema schema, ArraySegmentRecycler memoryRecycler, int deltaNumShards) throws IOException {
if (shouldReshard(shards.length, deltaNumShards)) {
throw new UnsupportedOperationException("Dynamic type sharding not supported for " + schema.getName()
+ ". Current numShards=" + shards.length + ", delta numShards=" + deltaNumShards);
}
if(shards.length > 1)
maxOrdinal = VarInt.readVInt(in);
for(int i=0; i<shards.length; i++) {
HollowMapTypeDataElements deltaData = new HollowMapTypeDataElements(memoryMode, memoryRecycler);
deltaData.readDelta(in);
if(stateEngine.isSkipTypeShardUpdateWithNoAdditions() && deltaData.encodedAdditions.isEmpty()) {
if(!deltaData.encodedRemovals.isEmpty())
notifyListenerAboutDeltaChanges(deltaData.encodedRemovals, deltaData.encodedAdditions, i, shards.length);
HollowMapTypeDataElements currentData = shards[i].currentDataElements();
GapEncodedVariableLengthIntegerReader oldRemovals = currentData.encodedRemovals == null ? GapEncodedVariableLengthIntegerReader.EMPTY_READER : currentData.encodedRemovals;
if(oldRemovals.isEmpty()) {
currentData.encodedRemovals = deltaData.encodedRemovals;
oldRemovals.destroy();
} else {
if(!deltaData.encodedRemovals.isEmpty()) {
currentData.encodedRemovals = GapEncodedVariableLengthIntegerReader.combine(oldRemovals, deltaData.encodedRemovals, memoryRecycler);
oldRemovals.destroy();
}
deltaData.encodedRemovals.destroy();
}
deltaData.encodedAdditions.destroy();
} else {
HollowMapTypeDataElements nextData = new HollowMapTypeDataElements(memoryMode, memoryRecycler);
HollowMapTypeDataElements oldData = shards[i].currentDataElements();
nextData.applyDelta(oldData, deltaData);
shards[i].setCurrentData(nextData);
notifyListenerAboutDeltaChanges(deltaData.encodedRemovals, deltaData.encodedAdditions, i, shards.length);
deltaData.encodedAdditions.destroy();
oldData.destroy();
}
deltaData.destroy();
stateEngine.getMemoryRecycler().swap();
}
if(shards.length == 1)
maxOrdinal = shards[0].currentDataElements().maxOrdinal;
}
public static void discardSnapshot(HollowBlobInput in, int numShards) throws IOException {
discardType(in, numShards, false);
}
public static void discardDelta(HollowBlobInput in, int numShards) throws IOException {
discardType(in, numShards, true);
}
public static void discardType(HollowBlobInput in, int numShards, boolean delta) throws IOException {
HollowMapTypeDataElements.discardFromInput(in, numShards, delta);
if(!delta)
SnapshotPopulatedOrdinalsReader.discardOrdinals(in);
}
@Override
public int maxOrdinal() {
return maxOrdinal;
}
@Override
public int size(int ordinal) {
sampler.recordSize();
return shards[ordinal & shardNumberMask].size(ordinal >> shardOrdinalShift);
}
@Override
public int get(int ordinal, int keyOrdinal) {
return get(ordinal, keyOrdinal, keyOrdinal);
}
@Override
public int get(int ordinal, int keyOrdinal, int hashCode) {
sampler.recordGet();
return shards[ordinal & shardNumberMask].get(ordinal >> shardOrdinalShift, keyOrdinal, hashCode);
}
@Override
public int findKey(int ordinal, Object... hashKey) {
sampler.recordGet();
if(keyDeriver == null)
return ORDINAL_NONE;
FieldType fieldTypes[] = keyDeriver.getFieldTypes();
if(hashKey.length != fieldTypes.length)
return ORDINAL_NONE;
return shards[ordinal & shardNumberMask].findKey(ordinal >> shardOrdinalShift, hashKey);
}
@Override
public int findValue(int ordinal, Object... hashKey) {
return (int)findEntry(ordinal, hashKey);
}
@Override
public long findEntry(int ordinal, Object... hashKey) {
sampler.recordGet();
if(keyDeriver == null)
return -1L;
FieldType fieldTypes[] = keyDeriver.getFieldTypes();
if(hashKey.length != fieldTypes.length)
return -1L;
return shards[ordinal & shardNumberMask].findEntry(ordinal >> shardOrdinalShift, hashKey);
}
@Override
public HollowMapEntryOrdinalIterator potentialMatchOrdinalIterator(int ordinal, int hashCode) {
sampler.recordGet();
if(size(ordinal) == 0)
return EmptyMapOrdinalIterator.INSTANCE;
return new PotentialMatchHollowMapEntryOrdinalIteratorImpl(ordinal, this, hashCode);
}
@Override
public HollowMapEntryOrdinalIterator ordinalIterator(int ordinal) {
sampler.recordIterator();
if(size(ordinal) == 0)
return EmptyMapOrdinalIterator.INSTANCE;
return new HollowMapEntryOrdinalIteratorImpl(ordinal, this);
}
@Override
public long relativeBucket(int ordinal, int bucketIndex) {
return shards[ordinal & shardNumberMask].relativeBucket(ordinal >> shardOrdinalShift, bucketIndex);
}
@Override
public HollowMapSchema getSchema() {
return (HollowMapSchema)schema;
}
@Override
public HollowSampler getSampler() {
return sampler;
}
@Override
public void setSamplingDirector(HollowSamplingDirector director) {
sampler.setSamplingDirector(director);
}
@Override
public void setFieldSpecificSamplingDirector(HollowFilterConfig fieldSpec, HollowSamplingDirector director) {
sampler.setFieldSpecificSamplingDirector(fieldSpec, director);
}
@Override
public void ignoreUpdateThreadForSampling(Thread t) {
sampler.setUpdateThread(t);
}
@Override
protected void invalidate() {
stateListeners = EMPTY_LISTENERS;
for(int i=0; i<shards.length; i++)
shards[i].invalidate();
}
HollowMapTypeDataElements[] currentDataElements() {
HollowMapTypeDataElements currentDataElements[] = new HollowMapTypeDataElements[shards.length];
for(int i=0; i<shards.length; i++)
currentDataElements[i] = shards[i].currentDataElements();
return currentDataElements;
}
void setCurrentData(HollowMapTypeDataElements data) {
if(shards.length > 1)
throw new UnsupportedOperationException("Cannot directly set data on sharded type state");
shards[0].setCurrentData(data);
maxOrdinal = data.maxOrdinal;
}
@Override
protected void applyToChecksum(HollowChecksum checksum, HollowSchema withSchema) {
if(!getSchema().equals(withSchema))
throw new IllegalArgumentException("HollowMapTypeReadState cannot calculate checksum with unequal schemas: " + getSchema().getName());
BitSet populatedOrdinals = getListener(PopulatedOrdinalListener.class).getPopulatedOrdinals();
for(int i=0; i<shards.length; i++)
shards[i].applyToChecksum(checksum, populatedOrdinals, i, shards.length);
}
@Override
public long getApproximateHeapFootprintInBytes() {
long totalApproximateHeapFootprintInBytes = 0;
for(int i=0; i<shards.length; i++)
totalApproximateHeapFootprintInBytes += shards[i].getApproximateHeapFootprintInBytes();
return totalApproximateHeapFootprintInBytes;
}
@Override
public long getApproximateHoleCostInBytes() {
long totalApproximateHoleCostInBytes = 0;
BitSet populatedOrdinals = getPopulatedOrdinals();
for(int i=0; i<shards.length; i++)
totalApproximateHoleCostInBytes += shards[i].getApproximateHoleCostInBytes(populatedOrdinals, i, shards.length);
return totalApproximateHoleCostInBytes;
}
public HollowPrimaryKeyValueDeriver getKeyDeriver() {
return keyDeriver;
}
public void buildKeyDeriver() {
if(getSchema().getHashKey() != null)
this.keyDeriver = new HollowPrimaryKeyValueDeriver(getSchema().getHashKey(), getStateEngine());
for(int i=0; i<shards.length; i++)
shards[i].setKeyDeriver(keyDeriver);
}
@Override
public int numShards() {
return shards.length;
}
}
| 9,094 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine/map/HollowMapDeltaHistoricalStateCreator.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.engine.map;
import static com.netflix.hollow.core.HollowConstants.ORDINAL_NONE;
import com.netflix.hollow.core.memory.encoding.FixedLengthElementArray;
import com.netflix.hollow.core.memory.encoding.HashCodes;
import com.netflix.hollow.core.memory.pool.WastefulRecycler;
import com.netflix.hollow.core.read.engine.PopulatedOrdinalListener;
import com.netflix.hollow.core.util.IntMap;
import com.netflix.hollow.core.util.RemovedOrdinalIterator;
/**
* This class contains the logic for extracting the removed records from a MAP type state
* to produce a historical type state.
*
* Not intended for external consumption.
*/
public class HollowMapDeltaHistoricalStateCreator {
private final HollowMapTypeDataElements historicalDataElements;
private final int shardNumberMask;
private final int shardOrdinalShift;
private HollowMapTypeReadState typeState;
private HollowMapTypeDataElements stateEngineDataElements[];
private RemovedOrdinalIterator iter;
private IntMap ordinalMapping;
private int nextOrdinal;
private long nextStartBucket;
public HollowMapDeltaHistoricalStateCreator(HollowMapTypeReadState typeState, boolean reverse) {
this.typeState = typeState;
this.stateEngineDataElements = typeState.currentDataElements();
this.historicalDataElements = new HollowMapTypeDataElements(WastefulRecycler.DEFAULT_INSTANCE);
this.iter = new RemovedOrdinalIterator(typeState.getListener(PopulatedOrdinalListener.class), reverse);
this.shardNumberMask = stateEngineDataElements.length - 1;
this.shardOrdinalShift = 31 - Integer.numberOfLeadingZeros(stateEngineDataElements.length);
}
public void populateHistory() {
populateStats();
historicalDataElements.mapPointerAndSizeData = new FixedLengthElementArray(historicalDataElements.memoryRecycler, ((long)historicalDataElements.maxOrdinal + 1) * historicalDataElements.bitsPerFixedLengthMapPortion);
historicalDataElements.entryData = new FixedLengthElementArray(historicalDataElements.memoryRecycler, historicalDataElements.totalNumberOfBuckets * historicalDataElements.bitsPerMapEntry);
iter.reset();
int ordinal = iter.next();
while(ordinal != ORDINAL_NONE) {
ordinalMapping.put(ordinal, nextOrdinal);
copyRecord(ordinal);
ordinal = iter.next();
}
}
public void dereferenceTypeState() {
this.typeState = null;
this.stateEngineDataElements = null;
this.iter = null;
}
public IntMap getOrdinalMapping() {
return ordinalMapping;
}
public HollowMapTypeReadState createHistoricalTypeReadState() {
HollowMapTypeReadState historicalTypeState = new HollowMapTypeReadState(null, typeState.getSchema(), 1);
historicalTypeState.setCurrentData(historicalDataElements);
return historicalTypeState;
}
private void populateStats() {
iter.reset();
int removedEntryCount = 0;
int maxSize = 0;
long totalBucketCount = 0;
int ordinal = iter.next();
while(ordinal != ORDINAL_NONE) {
removedEntryCount++;
int size = typeState.size(ordinal);
if(size > maxSize)
maxSize = size;
totalBucketCount += HashCodes.hashTableSize(size);
ordinal = iter.next();
}
historicalDataElements.maxOrdinal = removedEntryCount - 1;
historicalDataElements.bitsPerMapPointer = 64 - Long.numberOfLeadingZeros(totalBucketCount);
historicalDataElements.bitsPerMapSizeValue = 64 - Long.numberOfLeadingZeros(maxSize);
historicalDataElements.bitsPerFixedLengthMapPortion = historicalDataElements.bitsPerMapPointer + historicalDataElements.bitsPerMapSizeValue;
historicalDataElements.bitsPerKeyElement = stateEngineDataElements[0].bitsPerKeyElement;
historicalDataElements.bitsPerValueElement = stateEngineDataElements[0].bitsPerValueElement;
historicalDataElements.bitsPerMapEntry = stateEngineDataElements[0].bitsPerMapEntry;
historicalDataElements.emptyBucketKeyValue = stateEngineDataElements[0].emptyBucketKeyValue;
historicalDataElements.totalNumberOfBuckets = totalBucketCount;
ordinalMapping = new IntMap(removedEntryCount);
}
private void copyRecord(int ordinal) {
int shard = ordinal & shardNumberMask;
int shardOrdinal = ordinal >> shardOrdinalShift;
long bitsPerBucket = historicalDataElements.bitsPerMapEntry;
long size = typeState.size(ordinal);
long fromStartBucket = shardOrdinal == 0 ? 0 : stateEngineDataElements[shard].mapPointerAndSizeData.getElementValue((long)(shardOrdinal - 1) * stateEngineDataElements[shard].bitsPerFixedLengthMapPortion, stateEngineDataElements[shard].bitsPerMapPointer);
long fromEndBucket = stateEngineDataElements[shard].mapPointerAndSizeData.getElementValue((long)shardOrdinal * stateEngineDataElements[shard].bitsPerFixedLengthMapPortion, stateEngineDataElements[shard].bitsPerMapPointer);
long numBuckets = fromEndBucket - fromStartBucket;
historicalDataElements.mapPointerAndSizeData.setElementValue((long)nextOrdinal * historicalDataElements.bitsPerFixedLengthMapPortion, historicalDataElements.bitsPerMapPointer, nextStartBucket + numBuckets);
historicalDataElements.mapPointerAndSizeData.setElementValue(((long)nextOrdinal * historicalDataElements.bitsPerFixedLengthMapPortion) + historicalDataElements.bitsPerMapPointer, historicalDataElements.bitsPerMapSizeValue, size);
historicalDataElements.entryData.copyBits(stateEngineDataElements[shard].entryData, fromStartBucket * bitsPerBucket, nextStartBucket * bitsPerBucket, numBuckets * bitsPerBucket);
nextOrdinal++;
nextStartBucket += numBuckets;
}
}
| 9,095 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine/map/HollowMapDeltaApplicator.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.engine.map;
import com.netflix.hollow.core.memory.encoding.FixedLengthElementArray;
import com.netflix.hollow.core.memory.encoding.GapEncodedVariableLengthIntegerReader;
/**
* This class contains the logic for applying a delta to a current MAP type state
* to produce the next MAP type state.
*
* Not intended for external consumption.
*/
class HollowMapDeltaApplicator {
private final HollowMapTypeDataElements from;
private final HollowMapTypeDataElements delta;
private final HollowMapTypeDataElements target;
private long currentFromStateCopyStartBit = 0;
private long currentDeltaCopyStartBit = 0;
private long currentWriteStartBit = 0;
private long currentFromStateStartBucket = 0;
private long currentDeltaStartBucket = 0;
private long currentWriteStartBucket = 0;
private GapEncodedVariableLengthIntegerReader removalsReader;
private GapEncodedVariableLengthIntegerReader additionsReader;
HollowMapDeltaApplicator(HollowMapTypeDataElements from, HollowMapTypeDataElements delta, HollowMapTypeDataElements target) {
this.from = from;
this.delta = delta;
this.target = target;
}
public void applyDelta() {
removalsReader = from.encodedRemovals == null ? GapEncodedVariableLengthIntegerReader.EMPTY_READER : from.encodedRemovals;
additionsReader = delta.encodedAdditions;
removalsReader.reset();
additionsReader.reset();
target.encodedRemovals = delta.encodedRemovals;
target.maxOrdinal = delta.maxOrdinal;
target.bitsPerMapPointer = delta.bitsPerMapPointer;
target.bitsPerMapSizeValue = delta.bitsPerMapSizeValue;
target.bitsPerKeyElement = delta.bitsPerKeyElement;
target.bitsPerValueElement = delta.bitsPerValueElement;
target.bitsPerFixedLengthMapPortion = delta.bitsPerFixedLengthMapPortion;
target.bitsPerMapEntry = delta.bitsPerMapEntry;
target.emptyBucketKeyValue = delta.emptyBucketKeyValue;
target.totalNumberOfBuckets = delta.totalNumberOfBuckets;
target.mapPointerAndSizeData = new FixedLengthElementArray(target.memoryRecycler, ((long)target.maxOrdinal + 1) * target.bitsPerFixedLengthMapPortion);
target.entryData = new FixedLengthElementArray(target.memoryRecycler, target.totalNumberOfBuckets * target.bitsPerMapEntry);
if(target.bitsPerMapPointer == from.bitsPerMapPointer
&& target.bitsPerMapSizeValue == from.bitsPerMapSizeValue
&& target.bitsPerKeyElement == from.bitsPerKeyElement
&& target.bitsPerValueElement == from.bitsPerValueElement)
fastDelta();
else
slowDelta();
from.encodedRemovals = null;
removalsReader.destroy();
}
private void slowDelta() {
for(int i=0; i<=target.maxOrdinal; i++) {
mergeOrdinal(i);
}
}
private void fastDelta() {
int i=0;
int bulkCopyEndOrdinal = Math.min(from.maxOrdinal, target.maxOrdinal);
while(i <= target.maxOrdinal) {
int nextElementDiff = Math.min(additionsReader.nextElement(), removalsReader.nextElement());
if(nextElementDiff == i || i > bulkCopyEndOrdinal) {
mergeOrdinal(i++);
} else {
int recordsToCopy = nextElementDiff - i;
if(nextElementDiff > bulkCopyEndOrdinal)
recordsToCopy = bulkCopyEndOrdinal - i + 1;
fastCopyRecords(recordsToCopy);
i += recordsToCopy;
}
}
}
private void fastCopyRecords(int recordsToCopy) {
long mapPointerAndSizeBitsToCopy = (long)recordsToCopy * target.bitsPerFixedLengthMapPortion;
long eachMapPointerDifference = currentWriteStartBucket - currentFromStateStartBucket;
target.mapPointerAndSizeData.copyBits(from.mapPointerAndSizeData, currentFromStateCopyStartBit, currentWriteStartBit, mapPointerAndSizeBitsToCopy);
target.mapPointerAndSizeData.incrementMany(currentWriteStartBit, eachMapPointerDifference, target.bitsPerFixedLengthMapPortion, recordsToCopy);
currentFromStateCopyStartBit += mapPointerAndSizeBitsToCopy;
currentWriteStartBit += mapPointerAndSizeBitsToCopy;
long fromDataEndElement = from.mapPointerAndSizeData.getElementValue(currentFromStateCopyStartBit - from.bitsPerFixedLengthMapPortion, from.bitsPerMapPointer);
long bucketsToCopy = fromDataEndElement - currentFromStateStartBucket;
long bitsToCopy = bucketsToCopy * from.bitsPerMapEntry;
target.entryData.copyBits(from.entryData, currentFromStateStartBucket * from.bitsPerMapEntry, currentWriteStartBucket * from.bitsPerMapEntry, bitsToCopy);
currentFromStateStartBucket += bucketsToCopy;
currentWriteStartBucket += bucketsToCopy;
}
private void mergeOrdinal(int ordinal) {
boolean addFromDelta = additionsReader.nextElement() == ordinal;
boolean removeData = removalsReader.nextElement() == ordinal;
if(addFromDelta) {
addFromDelta(additionsReader);
}
if(ordinal <= from.maxOrdinal) {
long fromDataEndBucket = from.mapPointerAndSizeData.getElementValue(currentFromStateCopyStartBit, from.bitsPerMapPointer);
if(!removeData) {
for(long bucketIdx=currentFromStateStartBucket; bucketIdx<fromDataEndBucket; bucketIdx++) {
long bucketKey = from.entryData.getElementValue(bucketIdx * from.bitsPerMapEntry, from.bitsPerKeyElement);
long bucketValue = from.entryData.getElementValue(bucketIdx * from.bitsPerMapEntry + from.bitsPerKeyElement, from.bitsPerValueElement);
if(bucketKey == from.emptyBucketKeyValue)
bucketKey = target.emptyBucketKeyValue;
long currentWriteStartBucketBit = currentWriteStartBucket * target.bitsPerMapEntry;
target.entryData.setElementValue(currentWriteStartBucketBit, target.bitsPerKeyElement, bucketKey);
target.entryData.setElementValue(currentWriteStartBucketBit + target.bitsPerKeyElement, target.bitsPerValueElement, bucketValue);
currentWriteStartBucket++;
}
long fromDataSize = from.mapPointerAndSizeData.getElementValue(currentFromStateCopyStartBit + from.bitsPerMapPointer, from.bitsPerMapSizeValue);
target.mapPointerAndSizeData.setElementValue(currentWriteStartBit + target.bitsPerMapPointer, target.bitsPerMapSizeValue, fromDataSize);
} else {
removalsReader.advance();
}
currentFromStateStartBucket = fromDataEndBucket;
currentFromStateCopyStartBit += from.bitsPerFixedLengthMapPortion;
}
target.mapPointerAndSizeData.setElementValue(currentWriteStartBit, target.bitsPerMapPointer, currentWriteStartBucket);
currentWriteStartBit += target.bitsPerFixedLengthMapPortion;
}
private void addFromDelta(GapEncodedVariableLengthIntegerReader additionsReader) {
long deltaDataEndBucket = delta.mapPointerAndSizeData.getElementValue(currentDeltaCopyStartBit, delta.bitsPerMapPointer);
for(long bucketIdx=currentDeltaStartBucket; bucketIdx<deltaDataEndBucket; bucketIdx++) {
long bucketEntry = delta.entryData.getElementValue(bucketIdx * delta.bitsPerMapEntry, delta.bitsPerMapEntry);
target.entryData.setElementValue(currentWriteStartBucket * target.bitsPerMapEntry, target.bitsPerMapEntry, bucketEntry);
currentWriteStartBucket++;
}
long deltaDataSize = delta.mapPointerAndSizeData.getElementValue(currentDeltaCopyStartBit + delta.bitsPerMapPointer, delta.bitsPerMapSizeValue);
target.mapPointerAndSizeData.setElementValue(currentWriteStartBit + target.bitsPerMapPointer, target.bitsPerMapSizeValue, deltaDataSize);
currentDeltaStartBucket = deltaDataEndBucket;
currentDeltaCopyStartBit += delta.bitsPerFixedLengthMapPortion;
additionsReader.advance();
}
}
| 9,096 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine/map/HollowMapTypeReadStateShard.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.engine.map;
import static com.netflix.hollow.core.HollowConstants.ORDINAL_NONE;
import com.netflix.hollow.core.index.key.HollowPrimaryKeyValueDeriver;
import com.netflix.hollow.core.memory.HollowUnsafeHandle;
import com.netflix.hollow.core.memory.encoding.HashCodes;
import com.netflix.hollow.core.read.engine.SetMapKeyHasher;
import com.netflix.hollow.tools.checksum.HollowChecksum;
import java.util.BitSet;
class HollowMapTypeReadStateShard {
private volatile HollowMapTypeDataElements currentDataVolatile;
private HollowPrimaryKeyValueDeriver keyDeriver;
public int size(int ordinal) {
HollowMapTypeDataElements currentData;
int size;
do {
currentData = this.currentDataVolatile;
size = (int)currentData.mapPointerAndSizeData.getElementValue(((long)ordinal * currentData.bitsPerFixedLengthMapPortion) + currentData.bitsPerMapPointer, currentData.bitsPerMapSizeValue);
} while(readWasUnsafe(currentData));
return size;
}
public int get(int ordinal, int keyOrdinal, int hashCode) {
HollowMapTypeDataElements currentData;
int valueOrdinal;
threadsafe:
do {
long startBucket;
long endBucket;
do {
currentData = this.currentDataVolatile;
startBucket = ordinal == 0 ? 0 : currentData.mapPointerAndSizeData.getElementValue((long)(ordinal - 1) * currentData.bitsPerFixedLengthMapPortion, currentData.bitsPerMapPointer);
endBucket = currentData.mapPointerAndSizeData.getElementValue((long)ordinal * currentData.bitsPerFixedLengthMapPortion, currentData.bitsPerMapPointer);
} while(readWasUnsafe(currentData));
hashCode = HashCodes.hashInt(hashCode);
long bucket = startBucket + (hashCode & (endBucket - startBucket - 1));
int bucketKeyOrdinal = getBucketKeyByAbsoluteIndex(currentData, bucket);
while(bucketKeyOrdinal != currentData.emptyBucketKeyValue) {
if(bucketKeyOrdinal == keyOrdinal) {
valueOrdinal = getBucketValueByAbsoluteIndex(currentData, bucket);
continue threadsafe;
}
bucket++;
if(bucket == endBucket)
bucket = startBucket;
bucketKeyOrdinal = getBucketKeyByAbsoluteIndex(currentData, bucket);
}
valueOrdinal = ORDINAL_NONE;
} while(readWasUnsafe(currentData));
return valueOrdinal;
}
public int findKey(int ordinal, Object... hashKey) {
int hashCode = SetMapKeyHasher.hash(hashKey, keyDeriver.getFieldTypes());
HollowMapTypeDataElements currentData;
threadsafe:
do {
long startBucket;
long endBucket;
do {
currentData = this.currentDataVolatile;
startBucket = ordinal == 0 ? 0 : currentData.mapPointerAndSizeData.getElementValue((long)(ordinal - 1) * currentData.bitsPerFixedLengthMapPortion, currentData.bitsPerMapPointer);
endBucket = currentData.mapPointerAndSizeData.getElementValue((long)ordinal * currentData.bitsPerFixedLengthMapPortion, currentData.bitsPerMapPointer);
} while(readWasUnsafe(currentData));
long bucket = startBucket + (hashCode & (endBucket - startBucket - 1));
int bucketKeyOrdinal = getBucketKeyByAbsoluteIndex(currentData, bucket);
while(bucketKeyOrdinal != currentData.emptyBucketKeyValue) {
if(readWasUnsafe(currentData))
continue threadsafe;
if(keyDeriver.keyMatches(bucketKeyOrdinal, hashKey)) {
return bucketKeyOrdinal;
}
bucket++;
if(bucket == endBucket)
bucket = startBucket;
bucketKeyOrdinal = getBucketKeyByAbsoluteIndex(currentData, bucket);
}
} while(readWasUnsafe(currentData));
return ORDINAL_NONE;
}
public long findEntry(int ordinal, Object... hashKey) {
int hashCode = SetMapKeyHasher.hash(hashKey, keyDeriver.getFieldTypes());
HollowMapTypeDataElements currentData;
threadsafe:
do {
long startBucket;
long endBucket;
do {
currentData = this.currentDataVolatile;
startBucket = ordinal == 0 ? 0 : currentData.mapPointerAndSizeData.getElementValue((long)(ordinal - 1) * currentData.bitsPerFixedLengthMapPortion, currentData.bitsPerMapPointer);
endBucket = currentData.mapPointerAndSizeData.getElementValue((long)ordinal * currentData.bitsPerFixedLengthMapPortion, currentData.bitsPerMapPointer);
} while(readWasUnsafe(currentData));
long bucket = startBucket + (hashCode & (endBucket - startBucket - 1));
int bucketKeyOrdinal = getBucketKeyByAbsoluteIndex(currentData, bucket);
while(bucketKeyOrdinal != currentData.emptyBucketKeyValue) {
if(readWasUnsafe(currentData))
continue threadsafe;
if(keyDeriver.keyMatches(bucketKeyOrdinal, hashKey)) {
long valueOrdinal = getBucketValueByAbsoluteIndex(currentData, bucket);
if(readWasUnsafe(currentData))
continue threadsafe;
return (long)bucketKeyOrdinal << 32 | valueOrdinal;
}
bucket++;
if(bucket == endBucket)
bucket = startBucket;
bucketKeyOrdinal = getBucketKeyByAbsoluteIndex(currentData, bucket);
}
} while(readWasUnsafe(currentData));
return -1L;
}
public long relativeBucket(int ordinal, int bucketIndex) {
HollowMapTypeDataElements currentData;
long bucketValue;
do {
long absoluteBucketIndex;
do {
currentData = this.currentDataVolatile;
absoluteBucketIndex = getAbsoluteBucketStart(currentData, ordinal) + bucketIndex;
} while(readWasUnsafe(currentData));
long key = getBucketKeyByAbsoluteIndex(currentData, absoluteBucketIndex);
if(key == currentData.emptyBucketKeyValue)
return -1L;
bucketValue = key << 32 | getBucketValueByAbsoluteIndex(currentData, absoluteBucketIndex);
} while(readWasUnsafe(currentData));
return bucketValue;
}
private long getAbsoluteBucketStart(HollowMapTypeDataElements currentData, int ordinal) {
long startBucket = ordinal == 0 ? 0 : currentData.mapPointerAndSizeData.getElementValue((long)(ordinal - 1) * currentData.bitsPerFixedLengthMapPortion, currentData.bitsPerMapPointer);
return startBucket;
}
private int getBucketKeyByAbsoluteIndex(HollowMapTypeDataElements currentData, long absoluteBucketIndex) {
return (int)currentData.entryData.getElementValue(absoluteBucketIndex * currentData.bitsPerMapEntry, currentData.bitsPerKeyElement);
}
private int getBucketValueByAbsoluteIndex(HollowMapTypeDataElements currentData, long absoluteBucketIndex) {
return (int)currentData.entryData.getElementValue((absoluteBucketIndex * currentData.bitsPerMapEntry) + currentData.bitsPerKeyElement, currentData.bitsPerValueElement);
}
void invalidate() {
setCurrentData(null);
}
HollowMapTypeDataElements currentDataElements() {
return currentDataVolatile;
}
private boolean readWasUnsafe(HollowMapTypeDataElements data) {
HollowUnsafeHandle.getUnsafe().loadFence();
return data != currentDataVolatile;
}
void setCurrentData(HollowMapTypeDataElements data) {
this.currentDataVolatile = data;
}
protected void applyToChecksum(HollowChecksum checksum, BitSet populatedOrdinals, int shardNumber, int numShards) {
HollowMapTypeDataElements currentData = currentDataVolatile;
int ordinal = populatedOrdinals.nextSetBit(shardNumber);
while(ordinal != ORDINAL_NONE) {
if((ordinal & (numShards - 1)) == shardNumber) {
int shardOrdinal = ordinal / numShards;
int numBuckets = HashCodes.hashTableSize(size(shardOrdinal));
long offset = getAbsoluteBucketStart(currentData, shardOrdinal);
checksum.applyInt(ordinal);
for(int i=0; i<numBuckets; i++) {
int bucketKey = getBucketKeyByAbsoluteIndex(currentData, offset + i);
if(bucketKey != currentData.emptyBucketKeyValue) {
checksum.applyInt(i);
checksum.applyInt(bucketKey);
checksum.applyInt(getBucketValueByAbsoluteIndex(currentData, offset + i));
}
}
ordinal = ordinal + numShards;
} else {
// Round up ordinal
int r = (ordinal & -numShards) + shardNumber;
ordinal = (r <= ordinal) ? r + numShards : r;
}
ordinal = populatedOrdinals.nextSetBit(ordinal);
}
}
public long getApproximateHeapFootprintInBytes() {
HollowMapTypeDataElements currentData = currentDataVolatile;
long requiredBitsForMapPointers = ((long)currentData.maxOrdinal + 1) * currentData.bitsPerFixedLengthMapPortion;
long requiredBitsForMapBuckets = (long)currentData.totalNumberOfBuckets * currentData.bitsPerMapEntry;
long requiredBits = requiredBitsForMapPointers + requiredBitsForMapBuckets;
return requiredBits / 8;
}
public long getApproximateHoleCostInBytes(BitSet populatedOrdinals, int shardNumber, int numShards) {
HollowMapTypeDataElements currentData = currentDataVolatile;
long holeBits = 0;
int holeOrdinal = populatedOrdinals.nextClearBit(0);
while(holeOrdinal <= currentData.maxOrdinal) {
if((holeOrdinal & (numShards - 1)) == shardNumber)
holeBits += currentData.bitsPerFixedLengthMapPortion;
holeOrdinal = populatedOrdinals.nextClearBit(holeOrdinal + 1);
}
return holeBits / 8;
}
public void setKeyDeriver(HollowPrimaryKeyValueDeriver keyDeriver) {
this.keyDeriver = keyDeriver;
}
}
| 9,097 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine/map/HollowMapTypeDataElements.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.engine.map;
import com.netflix.hollow.core.memory.FixedLengthData;
import com.netflix.hollow.core.memory.FixedLengthDataFactory;
import com.netflix.hollow.core.memory.MemoryMode;
import com.netflix.hollow.core.memory.encoding.GapEncodedVariableLengthIntegerReader;
import com.netflix.hollow.core.memory.encoding.VarInt;
import com.netflix.hollow.core.memory.pool.ArraySegmentRecycler;
import com.netflix.hollow.core.read.HollowBlobInput;
import java.io.IOException;
/**
* This class holds the data for a {@link HollowMapTypeReadState}.
*
* During a delta, the HollowMapTypeReadState will create a new HollowMapTypeDataElements and atomically swap
* with the existing one to make sure a consistent view of the data is always available.
*/
public class HollowMapTypeDataElements {
int maxOrdinal;
FixedLengthData mapPointerAndSizeData;
FixedLengthData entryData;
GapEncodedVariableLengthIntegerReader encodedRemovals;
GapEncodedVariableLengthIntegerReader encodedAdditions;
int bitsPerMapPointer;
int bitsPerMapSizeValue;
int bitsPerFixedLengthMapPortion;
int bitsPerKeyElement;
int bitsPerValueElement;
int bitsPerMapEntry;
int emptyBucketKeyValue;
long totalNumberOfBuckets;
final ArraySegmentRecycler memoryRecycler;
final MemoryMode memoryMode;
public HollowMapTypeDataElements(ArraySegmentRecycler memoryRecycler) {
this(MemoryMode.ON_HEAP, memoryRecycler);
}
public HollowMapTypeDataElements(MemoryMode memoryMode, ArraySegmentRecycler memoryRecycler) {
this.memoryMode = memoryMode;
this.memoryRecycler = memoryRecycler;
}
void readSnapshot(HollowBlobInput in) throws IOException {
readFromInput(in, false);
}
void readDelta(HollowBlobInput in) throws IOException {
readFromInput(in,true);
}
private void readFromInput(HollowBlobInput in, boolean isDelta) throws IOException {
maxOrdinal = VarInt.readVInt(in);
if(isDelta) {
encodedRemovals = GapEncodedVariableLengthIntegerReader.readEncodedDeltaOrdinals(in, memoryRecycler);
encodedAdditions = GapEncodedVariableLengthIntegerReader.readEncodedDeltaOrdinals(in, memoryRecycler);
}
bitsPerMapPointer = VarInt.readVInt(in);
bitsPerMapSizeValue = VarInt.readVInt(in);
bitsPerKeyElement = VarInt.readVInt(in);
bitsPerValueElement = VarInt.readVInt(in);
bitsPerFixedLengthMapPortion = bitsPerMapPointer + bitsPerMapSizeValue;
bitsPerMapEntry = bitsPerKeyElement + bitsPerValueElement;
emptyBucketKeyValue = (1 << bitsPerKeyElement) - 1;
totalNumberOfBuckets = VarInt.readVLong(in);
mapPointerAndSizeData = FixedLengthDataFactory.get(in, memoryMode, memoryRecycler);
entryData = FixedLengthDataFactory.get(in, memoryMode, memoryRecycler);
}
static void discardFromInput(HollowBlobInput in, int numShards, boolean isDelta) throws IOException {
if(numShards > 1)
VarInt.readVInt(in); /// max ordinal
for(int i=0; i<numShards; i++) {
VarInt.readVInt(in); /// max ordinal
if(isDelta) {
/// addition/removal ordinals
GapEncodedVariableLengthIntegerReader.discardEncodedDeltaOrdinals(in);
GapEncodedVariableLengthIntegerReader.discardEncodedDeltaOrdinals(in);
}
/// statistics
VarInt.readVInt(in);
VarInt.readVInt(in);
VarInt.readVInt(in);
VarInt.readVInt(in);
VarInt.readVLong(in);
/// fixed length data
FixedLengthData.discardFrom(in);
FixedLengthData.discardFrom(in);
}
}
public void applyDelta(HollowMapTypeDataElements fromData, HollowMapTypeDataElements deltaData) {
new HollowMapDeltaApplicator(fromData, deltaData, this).applyDelta();
}
public void destroy() {
FixedLengthDataFactory.destroy(mapPointerAndSizeData, memoryRecycler);
FixedLengthDataFactory.destroy(entryData, memoryRecycler);
}
}
| 9,098 |
0 | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine | Create_ds/hollow/hollow/src/main/java/com/netflix/hollow/core/read/engine/map/PotentialMatchHollowMapEntryOrdinalIteratorImpl.java | /*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.engine.map;
import static com.netflix.hollow.core.HollowConstants.ORDINAL_NONE;
import com.netflix.hollow.core.memory.encoding.HashCodes;
import com.netflix.hollow.core.read.dataaccess.HollowMapTypeDataAccess;
import com.netflix.hollow.core.read.iterator.HollowMapEntryOrdinalIterator;
/**
* This is a {@link HollowMapEntryOrdinalIterator} which only will iterate over the potential matches for a specific hash code.
*
* Once this iterator encounters an empty bucket, the iteration ends.
*/
public class PotentialMatchHollowMapEntryOrdinalIteratorImpl implements HollowMapEntryOrdinalIterator {
private final int mapOrdinal;
private final HollowMapTypeDataAccess dataAccess;
private final int numBuckets;
private int currentBucket;
private int key;
private int value;
public PotentialMatchHollowMapEntryOrdinalIteratorImpl(int mapOrdinal, HollowMapTypeDataAccess dataAccess, int hashCode) {
this.mapOrdinal = mapOrdinal;
this.dataAccess = dataAccess;
this.numBuckets = HashCodes.hashTableSize(dataAccess.size(mapOrdinal));
this.currentBucket = HashCodes.hashInt(hashCode) & (numBuckets - 1);
}
@Override
public int getKey() {
return key;
}
@Override
public int getValue() {
return value;
}
@Override
public boolean next() {
long currentBucketValue = dataAccess.relativeBucket(mapOrdinal, currentBucket);
int currentBucketKey = (int)(currentBucketValue >>> 32);
if(currentBucketKey == ORDINAL_NONE)
return false;
key = currentBucketKey;
value = (int)currentBucketValue;
currentBucket++;
currentBucket &= numBuckets - 1;
return true;
}
}
| 9,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.