index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple/arrayofdoubles/ArrayOfDoublesIntersection.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.tuple.arrayofdoubles;
import static java.lang.Math.min;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.common.SketchesStateException;
import org.apache.datasketches.memory.WritableMemory;
import org.apache.datasketches.tuple.Util;
/**
* Computes the intersection of two or more tuple sketches of type ArrayOfDoubles.
* A new instance represents the Universal Set.
* Every update() computes an intersection with the internal set
* and can only reduce the internal set.
*/
public abstract class ArrayOfDoublesIntersection {
//not changed by resetToEmpty() or hardReset()
private final short seedHash_;
private final int numValues_;
//nulled or reset by resetToEmpty
private HashTables hashTables_;
private boolean empty_;
private boolean firstCall_;
private long thetaLong_;
/**
* Internal constructor, called by HeapArrayOfDoublesIntersection and DirectArrayOfDoublesIntersection
* @param numValues the number of double values in the summary array
* @param seed the hash function update seed.
*/
ArrayOfDoublesIntersection(final int numValues, final long seed) {
seedHash_ = Util.computeSeedHash(seed);
numValues_ = numValues;
hashTables_ = null;
empty_ = false;
thetaLong_ = Long.MAX_VALUE;
firstCall_ = true;
}
/**
* Performs a stateful intersection of the internal set with the given tupleSketch.
* The given tupleSketch and the internal state must have the same <i>numValues</i>.
* @param tupleSketch Input sketch to intersect with the internal set.
* @param combiner Method of combining two arrays of double values
*/
public void intersect(final ArrayOfDoublesSketch tupleSketch, final ArrayOfDoublesCombiner combiner) {
if (tupleSketch == null) { throw new SketchesArgumentException("Sketch must not be null"); }
Util.checkSeedHashes(seedHash_, tupleSketch.getSeedHash());
if (tupleSketch.numValues_ != numValues_) {
throw new SketchesArgumentException(
"Input tupleSketch cannot have different numValues from the internal numValues.");
}
final boolean isFirstCall = firstCall_;
firstCall_ = false;
//could be first or next call
final boolean emptyIn = tupleSketch.isEmpty();
if (empty_ || emptyIn) { //empty rule
//Whatever the current internal state, we make our local empty.
resetToEmpty(); //
return;
}
final long thetaLongIn = tupleSketch.getThetaLong();
thetaLong_ = min(thetaLong_, thetaLongIn); //Theta rule
if (tupleSketch.getRetainedEntries() == 0) {
if (hashTables_ != null) {
hashTables_.clear();
}
}
// input sketch will have valid entries > 0
if (isFirstCall) {
//Copy first sketch data into local instance hashTables_
hashTables_ = new HashTables(tupleSketch);
}
//Next Call
else {
assert hashTables_ != null;
if (hashTables_.getNumKeys() == 0) { return; }
//process intersect with current hashTables
hashTables_ = hashTables_.getIntersectHashTables(tupleSketch, thetaLong_, combiner);
}
}
/**
* Gets the internal set as an on-heap compact sketch.
* @return Result of the intersections so far as a compact sketch.
*/
public ArrayOfDoublesCompactSketch getResult() {
return getResult(null);
}
/**
* Gets the result of stateful intersections so far.
* @param dstMem Memory for the compact sketch (can be null).
* @return Result of the intersections so far as a compact sketch.
*/
public ArrayOfDoublesCompactSketch getResult(final WritableMemory dstMem) {
if (firstCall_) {
throw new SketchesStateException(
"getResult() with no intervening intersections is not a legal result.");
}
long[] hashArrOut = new long[0];
double[] valuesArrOut = new double[0];
if (hashTables_ != null && hashTables_.getHashTable() != null) {
final int numKeys = hashTables_.getNumKeys();
if (numKeys > 0) {
final int tableSize = hashTables_.getHashTable().length;
hashArrOut = new long[numKeys];
valuesArrOut = new double[numKeys * numValues_];
// & flatten the hash tables
int cnt = 0;
final long[] hashTable = hashTables_.getHashTable();
final double[][] valueTable = hashTables_.getValueTable();
for (int i = 0; i < tableSize; i++) {
final long hash = hashTable[i];
if (hash == 0 || hash > thetaLong_) { continue; }
hashArrOut[cnt] = hash;
System.arraycopy(valueTable[i], 0, valuesArrOut, cnt * numValues_, numValues_);
cnt++;
}
assert cnt == numKeys;
}
}
return (dstMem == null)
? new HeapArrayOfDoublesCompactSketch(hashArrOut, valuesArrOut,
thetaLong_, empty_, numValues_, seedHash_)
: new DirectArrayOfDoublesCompactSketch(hashArrOut, valuesArrOut,
thetaLong_, empty_, numValues_, seedHash_, dstMem);
}
/**
* Resets the internal set to the initial state, which represents the Universal Set
*/
public void reset() {
hardReset();
}
private void hardReset() {
empty_ = false;
firstCall_ = true;
thetaLong_ = Long.MAX_VALUE;
if (hashTables_ != null) { hashTables_.clear(); }
}
private void resetToEmpty() {
empty_ = true;
firstCall_ = false;
thetaLong_ = Long.MAX_VALUE;
if (hashTables_ != null) { hashTables_.clear(); }
}
protected abstract ArrayOfDoublesQuickSelectSketch createSketch(int nomEntries, int numValues, long seed);
}
| 2,600 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple/arrayofdoubles/DirectArrayOfDoublesIntersection.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.tuple.arrayofdoubles;
import org.apache.datasketches.memory.WritableMemory;
/**
* Direct Intersection operation for tuple sketches of type ArrayOfDoubles.
* <p>This implementation uses data in a given Memory that is owned and managed by the caller.
* This Memory can be off-heap, which if managed properly will greatly reduce the need for
* the JVM to perform garbage collection.</p>
*/
final class DirectArrayOfDoublesIntersection extends ArrayOfDoublesIntersection {
private WritableMemory mem_;
/**
* Creates an instance of a DirectArrayOfDoublesIntersection with a custom update seed
* @param numValues number of double values associated with each key
* @param seed <a href="{@docRoot}/resources/dictionary.html#seed">See seed</a>
* @param dstMem <a href="{@docRoot}/resources/dictionary.html#mem">See Memory</a>
*/
DirectArrayOfDoublesIntersection(final int numValues, final long seed, final WritableMemory dstMem) {
super(numValues, seed);
mem_ = dstMem;
}
@Override
protected ArrayOfDoublesQuickSelectSketch createSketch(final int nomEntries, final int numValues,
final long seed) {
return new DirectArrayOfDoublesQuickSelectSketch(nomEntries, 0, 1f, numValues, seed, mem_);
}
}
| 2,601 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple/arrayofdoubles/HeapArrayOfDoublesQuickSelectSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.tuple.arrayofdoubles;
import static org.apache.datasketches.common.Util.ceilingIntPowerOf2;
import static org.apache.datasketches.common.Util.exactLog2OfLong;
import java.nio.ByteOrder;
import java.util.Arrays;
import org.apache.datasketches.common.Family;
import org.apache.datasketches.common.ResizeFactor;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
import org.apache.datasketches.thetacommon.HashOperations;
import org.apache.datasketches.tuple.SerializerDeserializer;
import org.apache.datasketches.tuple.Util;
/**
* The on-heap implementation of the tuple QuickSelect sketch of type ArrayOfDoubles.
*/
final class HeapArrayOfDoublesQuickSelectSketch extends ArrayOfDoublesQuickSelectSketch {
private final int lgNomEntries_;
private final int lgResizeFactor_;
private final float samplingProbability_;
private int count_;
private long[] keys_;
private double[] values_;
/**
* This is to create an instance of a QuickSelectSketch with custom resize factor and sampling
* probability
* @param nomEntries Nominal number of entries. Forced to the smallest power of 2 greater than
* or equal to the given value.
* @param lgResizeFactor log2(resize factor) - value from 0 to 3:
* 0 - no resizing (max size allocated),
* 1 - double internal hash table each time it reaches a threshold
* 2 - grow four times
* 3 - grow eight times (default)
* @param samplingProbability
* <a href="{@docRoot}/resources/dictionary.html#p">See Sampling Probability</a>
* @param numValues number of double values to keep for each key
* @param seed <a href="{@docRoot}/resources/dictionary.html#seed">See seed</a>
*/
HeapArrayOfDoublesQuickSelectSketch(final int nomEntries, final int lgResizeFactor,
final float samplingProbability, final int numValues, final long seed) {
super(numValues, seed);
lgNomEntries_ = exactLog2OfLong(ceilingIntPowerOf2(nomEntries));
lgResizeFactor_ = lgResizeFactor;
samplingProbability_ = samplingProbability;
thetaLong_ = (long) (Long.MAX_VALUE * (double) samplingProbability);
final int startingCapacity = Util.getStartingCapacity(nomEntries, lgResizeFactor);
keys_ = new long[startingCapacity];
values_ = new double[startingCapacity * numValues];
lgCurrentCapacity_ = Integer.numberOfTrailingZeros(startingCapacity);
setRebuildThreshold();
}
/**
* This is to create an instance given a serialized form
* @param mem <a href="{@docRoot}/resources/dictionary.html#mem">See Memory</a>
* @param seed <a href="{@docRoot}/resources/dictionary.html#seed">See seed</a>
*/
HeapArrayOfDoublesQuickSelectSketch(final Memory mem, final long seed) {
super(mem.getByte(NUM_VALUES_BYTE), seed);
SerializerDeserializer.validateFamily(mem.getByte(FAMILY_ID_BYTE),
mem.getByte(PREAMBLE_LONGS_BYTE));
SerializerDeserializer.validateType(mem.getByte(SKETCH_TYPE_BYTE),
SerializerDeserializer.SketchType.ArrayOfDoublesQuickSelectSketch);
final byte version = mem.getByte(SERIAL_VERSION_BYTE);
if (version != serialVersionUID) {
throw new SketchesArgumentException("Serial version mismatch. Expected: "
+ serialVersionUID + ", actual: " + version);
}
final byte flags = mem.getByte(FLAGS_BYTE);
final boolean isBigEndian = (flags & (1 << Flags.IS_BIG_ENDIAN.ordinal())) > 0;
if (isBigEndian ^ ByteOrder.nativeOrder().equals(ByteOrder.BIG_ENDIAN)) {
throw new SketchesArgumentException("Byte order mismatch");
}
Util.checkSeedHashes(mem.getShort(SEED_HASH_SHORT), Util.computeSeedHash(seed));
isEmpty_ = (flags & (1 << Flags.IS_EMPTY.ordinal())) > 0;
lgNomEntries_ = mem.getByte(LG_NOM_ENTRIES_BYTE);
thetaLong_ = mem.getLong(THETA_LONG);
final int currentCapacity = 1 << mem.getByte(LG_CUR_CAPACITY_BYTE);
lgResizeFactor_ = mem.getByte(LG_RESIZE_FACTOR_BYTE);
samplingProbability_ = mem.getFloat(SAMPLING_P_FLOAT);
keys_ = new long[currentCapacity];
values_ = new double[currentCapacity * numValues_];
final boolean hasEntries = (flags & (1 << Flags.HAS_ENTRIES.ordinal())) > 0;
count_ = hasEntries ? mem.getInt(RETAINED_ENTRIES_INT) : 0;
if (count_ > 0) {
mem.getLongArray(ENTRIES_START, keys_, 0, currentCapacity);
mem.getDoubleArray(ENTRIES_START + ((long) SIZE_OF_KEY_BYTES * currentCapacity), values_, 0,
currentCapacity * numValues_);
}
setRebuildThreshold();
lgCurrentCapacity_ = Integer.numberOfTrailingZeros(currentCapacity);
}
@Override
//converts heap hashTable of double[] to compacted double[][]
public double[][] getValues() {
final int numVal = numValues_;
final int count = getRetainedEntries();
final double[][] values = new double[count][];
if (count > 0) {
int cnt = 0;
for (int j = 0; j < keys_.length; j++) {
if (keys_[j] == 0) { continue; }
values[cnt++] = Arrays.copyOfRange(values_, j * numVal, (j + 1) * numVal);
}
assert cnt == count;
}
return values;
}
@Override
//converts heap hashTable of double[] to compacted double[]
double[] getValuesAsOneDimension() {
final int numVal = numValues_;
final int count = getRetainedEntries();
final double[] values = new double[count * numVal];
if (count > 0) {
int cnt = 0;
for (int j = 0; j < keys_.length; j++) {
if (keys_[j] == 0) { continue; }
System.arraycopy(values_, j * numVal, values, cnt++ * numVal, numVal);
}
assert cnt == count;
}
return values;
}
@Override
//converts heap hashTable of long[] to compacted long[]
long[] getKeys() {
final int count = getRetainedEntries();
final long[] keysArr = new long[count];
if (count > 0) {
int cnt = 0;
for (int j = 0; j < keys_.length; j++) {
if (keys_[j] == 0) { continue; }
keysArr[cnt++] = keys_[j];
}
assert cnt == count;
}
return keysArr;
}
@Override
public int getRetainedEntries() {
return count_;
}
@Override
public int getNominalEntries() {
return 1 << lgNomEntries_;
}
@Override
public float getSamplingProbability() {
return samplingProbability_;
}
@Override
public ResizeFactor getResizeFactor() {
return ResizeFactor.getRF(lgResizeFactor_);
}
@Override
public byte[] toByteArray() {
final byte[] byteArray = new byte[getSerializedSizeBytes()];
final WritableMemory mem = WritableMemory.writableWrap(byteArray);
serializeInto(mem);
return byteArray;
}
@Override
public ArrayOfDoublesSketchIterator iterator() {
return new HeapArrayOfDoublesSketchIterator(keys_, values_, numValues_);
}
@Override
int getSerializedSizeBytes() {
return ENTRIES_START + ((SIZE_OF_KEY_BYTES + (SIZE_OF_VALUE_BYTES * numValues_)) * getCurrentCapacity());
}
// X/Y: X = Byte index for just AoDQuickSelectSketch
// Y = Byte index when combined with Union Preamble
// Long || Start Byte Adr:
// Adr:
// First 16 bytes are preamble from AoDUnion
// || 7/23 | 6/22 | 5/21 | 4/20 | 3/19 | 2/18 | 1/17 | 0/16 |
// 0/2 || Seed Hash | #Dbls | Flags | SkType2 | FamID | SerVer | Preamble_Longs |
// || 15/31 | 14/30 | 13/29 | 12/28 | 11/27 | 10/26 | 9/25 | 8/24 |
// 1/3 ||------------------------------Theta Long----------------------------------------------|
// || 23/39 | 22/38 | 21/37 | 20/36 | 19/35 | 18/34 | 17/33 | 16/32 |
// 2/4 || Sampling P Float | | LgRF |lgCapLongs| LgNomEntries |
// || 31/47 | 30/46 | 29/45 | 28/44 | 27/43 | 26/42 | 25/41 | 24/40 |
// 3/5 || | Retained Entries Int |
// || | 32/48 |
// 4/6 || Keys Array longs * keys[] Length |
// || Values Array doubles * values[] Length |
@Override
void serializeInto(final WritableMemory mem) {
mem.putByte(PREAMBLE_LONGS_BYTE, (byte) 1);
mem.putByte(SERIAL_VERSION_BYTE, serialVersionUID);
mem.putByte(FAMILY_ID_BYTE, (byte) Family.TUPLE.getID());
mem.putByte(SKETCH_TYPE_BYTE,
(byte) SerializerDeserializer.SketchType.ArrayOfDoublesQuickSelectSketch.ordinal());
final boolean isBigEndian = ByteOrder.nativeOrder().equals(ByteOrder.BIG_ENDIAN);
mem.putByte(FLAGS_BYTE, (byte)(
(isBigEndian ? 1 << Flags.IS_BIG_ENDIAN.ordinal() : 0)
| (isInSamplingMode() ? 1 << Flags.IS_IN_SAMPLING_MODE.ordinal() : 0)
| (isEmpty_ ? 1 << Flags.IS_EMPTY.ordinal() : 0)
| (count_ > 0 ? 1 << Flags.HAS_ENTRIES.ordinal() : 0)
));
mem.putByte(NUM_VALUES_BYTE, (byte) numValues_);
mem.putShort(SEED_HASH_SHORT, Util.computeSeedHash(seed_));
mem.putLong(THETA_LONG, thetaLong_);
mem.putByte(LG_NOM_ENTRIES_BYTE, (byte) lgNomEntries_);
mem.putByte(LG_CUR_CAPACITY_BYTE, (byte) Integer.numberOfTrailingZeros(keys_.length));
mem.putByte(LG_RESIZE_FACTOR_BYTE, (byte) lgResizeFactor_);
mem.putFloat(SAMPLING_P_FLOAT, samplingProbability_);
mem.putInt(RETAINED_ENTRIES_INT, count_);
if (count_ > 0) {
mem.putLongArray(ENTRIES_START, keys_, 0, keys_.length);
mem.putDoubleArray(ENTRIES_START + ((long) SIZE_OF_KEY_BYTES * keys_.length), values_, 0, values_.length);
}
}
@Override
public boolean hasMemory() { return false; }
@Override
Memory getMemory() { return null; }
@Override
public void reset() {
isEmpty_ = true;
count_ = 0;
thetaLong_ = (long) (Long.MAX_VALUE * (double) samplingProbability_);
final int startingCapacity = Util.getStartingCapacity(1 << lgNomEntries_, lgResizeFactor_);
keys_ = new long[startingCapacity];
values_ = new double[startingCapacity * numValues_];
lgCurrentCapacity_ = Integer.numberOfTrailingZeros(startingCapacity);
setRebuildThreshold();
}
@Override
protected long getKey(final int index) {
return keys_[index];
}
@Override
protected void incrementCount() {
count_++;
}
@Override
protected void setValues(final int index, final double[] values) {
if (numValues_ == 1) {
values_[index] = values[0];
} else {
System.arraycopy(values, 0, values_, index * numValues_, numValues_);
}
}
@Override
protected void updateValues(final int index, final double[] values) {
if (numValues_ == 1) {
values_[index] += values[0];
} else {
final int offset = index * numValues_;
for (int i = 0; i < numValues_; i++) {
values_[offset + i] += values[i];
}
}
}
@Override
protected void setNotEmpty() {
isEmpty_ = false;
}
@Override
protected boolean isInSamplingMode() {
return samplingProbability_ < 1f;
}
@Override
protected void setThetaLong(final long thetaLong) {
thetaLong_ = thetaLong;
}
@Override
protected int getCurrentCapacity() {
return keys_.length;
}
@Override
protected void rebuild(final int newCapacity) {
final long[] oldKeys = keys_;
final double[] oldValues = values_;
keys_ = new long[newCapacity];
values_ = new double[newCapacity * numValues_];
count_ = 0;
lgCurrentCapacity_ = Integer.numberOfTrailingZeros(newCapacity);
for (int i = 0; i < oldKeys.length; i++) {
if ((oldKeys[i] != 0) && (oldKeys[i] < thetaLong_)) {
insert(oldKeys[i], Arrays.copyOfRange(oldValues, i * numValues_, (i + 1) * numValues_));
}
}
setRebuildThreshold();
}
@Override
protected int insertKey(final long key) {
return HashOperations.hashInsertOnly(keys_, lgCurrentCapacity_, key);
}
@Override
protected int findOrInsertKey(final long key) {
return HashOperations.hashSearchOrInsert(keys_, lgCurrentCapacity_, key);
}
@Override
protected double[] find(final long key) {
final int index = HashOperations.hashSearch(keys_, lgCurrentCapacity_, key);
if (index == -1) { return null; }
return Arrays.copyOfRange(values_, index * numValues_, (index + 1) * numValues_);
}
}
| 2,602 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple/arrayofdoubles/DirectArrayOfDoublesQuickSelectSketchR.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.tuple.arrayofdoubles;
import org.apache.datasketches.common.SketchesReadOnlyException;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
final class DirectArrayOfDoublesQuickSelectSketchR extends DirectArrayOfDoublesQuickSelectSketch {
DirectArrayOfDoublesQuickSelectSketchR(final Memory mem, final long seed) {
super((WritableMemory) mem, seed);
}
@Override
void insertOrIgnore(final long key, final double[] values) {
throw new SketchesReadOnlyException();
}
@Override
public void trim() {
throw new SketchesReadOnlyException();
}
}
| 2,603 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple/arrayofdoubles/ArrayOfDoublesQuickSelectSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.tuple.arrayofdoubles;
import static org.apache.datasketches.common.Util.ceilingIntPowerOf2;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.memory.WritableMemory;
import org.apache.datasketches.thetacommon.QuickSelect;
import org.apache.datasketches.thetacommon.ThetaUtil;
/**
* Top level class for hash table based implementations of tuple sketch of type
* ArrayOfDoubles that uses the QuickSelect algorithm.
*/
abstract class ArrayOfDoublesQuickSelectSketch extends ArrayOfDoublesUpdatableSketch {
static final byte serialVersionUID = 1;
// Layout of next 16 bytes:
// Long || Start Byte Adr:
// Adr:
// || 23 | 22 | 21 | 20 | 19 | 18 | 17 | 16 |
// 3 ||-----------P (float)---------------|--------|--lgRF--|--lgArr-|---lgNom---|
// || 31 | 30 | 29 | 28 | 27 | 26 | 25 | 24 |
// 4 ||-----------------------------------|----------Retained Entries------------|
static final int LG_NOM_ENTRIES_BYTE = 16;
static final int LG_CUR_CAPACITY_BYTE = 17;
static final int LG_RESIZE_FACTOR_BYTE = 18;
// 1 byte of padding for alignment
static final int SAMPLING_P_FLOAT = 20;
static final int RETAINED_ENTRIES_INT = 24;
// 4 bytes of padding for alignment
static final int ENTRIES_START = 32;
static final int DEFAULT_LG_RESIZE_FACTOR = 3;
// these can be derived from other things, but are kept here for performance
int rebuildThreshold_; //absolute value relative to current capacity
int lgCurrentCapacity_;
ArrayOfDoublesQuickSelectSketch(final int numValues, final long seed) {
super(numValues, seed);
}
abstract void updateValues(int index, double[] values);
abstract void setNotEmpty();
abstract boolean isInSamplingMode();
abstract void rebuild(int newCapacity);
abstract long getKey(int index);
abstract void setValues(int index, double[] values);
abstract void incrementCount();
abstract void setThetaLong(long thetaLong);
abstract int insertKey(long key);
abstract int findOrInsertKey(long key);
abstract double[] find(long key);
abstract int getSerializedSizeBytes();
abstract void serializeInto(WritableMemory mem);
@Override
public void trim() {
if (getRetainedEntries() > getNominalEntries()) {
setThetaLong(getNewThetaLong());
rebuild();
}
}
@Override
public int getMaxBytes() {
final int nomEntries = getNominalEntries();
final int numValues = getNumValues();
return getMaxBytes(nomEntries, numValues);
}
@Override
public int getCurrentBytes() {
return getSerializedSizeBytes();
}
/**
* @param nomEntries Nominal number of entries. Forced to the nearest power of 2 greater than or equal to
* given value.
* @param numValues Number of double values to keep for each key
* @return maximum required storage bytes given nomEntries and numValues
*/
static int getMaxBytes(final int nomEntries, final int numValues) {
return ENTRIES_START
+ (SIZE_OF_KEY_BYTES + SIZE_OF_VALUE_BYTES * numValues) * ceilingIntPowerOf2(nomEntries) * 2;
}
// non-public methods below
// this is a special back door insert for merging
// not sufficient by itself without keeping track of theta of another sketch
void merge(final long key, final double[] values) {
setNotEmpty();
if (key < thetaLong_) {
final int index = findOrInsertKey(key);
if (index < 0) {
incrementCount();
setValues(~index, values);
} else {
updateValues(index, values);
}
rebuildIfNeeded();
}
}
void rebuildIfNeeded() {
if (getRetainedEntries() <= rebuildThreshold_) { return; }
if (getCurrentCapacity() > getNominalEntries()) {
setThetaLong(getNewThetaLong());
rebuild();
} else {
rebuild(getCurrentCapacity() * getResizeFactor().getValue());
}
}
void rebuild() {
rebuild(getCurrentCapacity());
}
void insert(final long key, final double[] values) {
final int index = insertKey(key);
setValues(index, values);
incrementCount();
}
final void setRebuildThreshold() {
if (getCurrentCapacity() > getNominalEntries()) {
rebuildThreshold_ = (int) (getCurrentCapacity() * ThetaUtil.REBUILD_THRESHOLD);
} else {
rebuildThreshold_ = (int) (getCurrentCapacity() * ThetaUtil.RESIZE_THRESHOLD);
}
}
@Override
void insertOrIgnore(final long key, final double[] values) {
if (values.length != getNumValues()) {
throw new SketchesArgumentException("input array of values must have " + getNumValues()
+ " elements, but has " + values.length);
}
setNotEmpty();
if ((key == 0) || (key >= thetaLong_)) { return; }
final int index = findOrInsertKey(key);
if (index < 0) {
incrementCount();
setValues(~index, values);
} else {
updateValues(index, values);
}
rebuildIfNeeded();
}
long getNewThetaLong() {
final long[] keys = new long[getRetainedEntries()];
int i = 0;
for (int j = 0; j < getCurrentCapacity(); j++) {
final long key = getKey(j);
if (key != 0) { keys[i++] = key; }
}
return QuickSelect.select(keys, 0, getRetainedEntries() - 1, getNominalEntries());
}
}
| 2,604 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple/arrayofdoubles/ArrayOfDoublesSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.tuple.arrayofdoubles;
import static org.apache.datasketches.common.Util.LS;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
import org.apache.datasketches.thetacommon.BinomialBoundsN;
import org.apache.datasketches.thetacommon.ThetaUtil;
import org.apache.datasketches.tuple.SerializerDeserializer;
/**
* The base class for the tuple sketch of type ArrayOfDoubles, where an array of double values
* is associated with each key.
* A primitive array of doubles is used here, as opposed to a generic Summary object,
* for improved performance.
*/
public abstract class ArrayOfDoublesSketch {
// The concept of being empty is about representing an empty set.
// So a sketch can be non-empty, and have no entries.
// For example, as a result of a sampling, when some data was presented to the sketch, but no
// entries were retained.
static enum Flags { IS_BIG_ENDIAN, IS_IN_SAMPLING_MODE, IS_EMPTY, HAS_ENTRIES }
static final int SIZE_OF_KEY_BYTES = Long.BYTES;
static final int SIZE_OF_VALUE_BYTES = Double.BYTES;
// Common Layout of first 16 bytes and Empty AoDCompactSketch:
// Long || Start Byte Adr:
// Adr:
// || 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
// 0 || Seed Hash | #Dbls | Flags | SkType | FamID | SerVer | Preamble_Longs |
// || 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 |
// 1 ||-------------------------Theta Long------------------------------------------------|
static final int PREAMBLE_LONGS_BYTE = 0; // not used, always 1
static final int SERIAL_VERSION_BYTE = 1;
static final int FAMILY_ID_BYTE = 2;
static final int SKETCH_TYPE_BYTE = 3;
static final int FLAGS_BYTE = 4;
static final int NUM_VALUES_BYTE = 5;
static final int SEED_HASH_SHORT = 6;
static final int THETA_LONG = 8;
final int numValues_;
long thetaLong_;
boolean isEmpty_ = true;
ArrayOfDoublesSketch(final int numValues) {
numValues_ = numValues;
}
/**
* Heapify the given Memory as an ArrayOfDoublesSketch
* @param mem the given Memory
* @return an ArrayOfDoublesSketch
*/
public static ArrayOfDoublesSketch heapify(final Memory mem) {
return heapify(mem, ThetaUtil.DEFAULT_UPDATE_SEED);
}
/**
* Heapify the given Memory and seed as a ArrayOfDoublesSketch
* @param mem the given Memory
* @param seed the given seed
* @return an ArrayOfDoublesSketch
*/
public static ArrayOfDoublesSketch heapify(final Memory mem, final long seed) {
final SerializerDeserializer.SketchType sketchType = SerializerDeserializer.getSketchType(mem);
if (sketchType == SerializerDeserializer.SketchType.ArrayOfDoublesQuickSelectSketch) {
return new HeapArrayOfDoublesQuickSelectSketch(mem, seed);
}
return new HeapArrayOfDoublesCompactSketch(mem, seed);
}
/**
* Wrap the given Memory as an ArrayOfDoublesSketch
* @param mem the given Memory
* @return an ArrayOfDoublesSketch
*/
public static ArrayOfDoublesSketch wrap(final Memory mem) {
return wrap(mem, ThetaUtil.DEFAULT_UPDATE_SEED);
}
/**
* Wrap the given Memory and seed as a ArrayOfDoublesSketch
* @param mem the given Memory
* @param seed the given seed
* @return an ArrayOfDoublesSketch
*/
public static ArrayOfDoublesSketch wrap(final Memory mem, final long seed) {
final SerializerDeserializer.SketchType sketchType = SerializerDeserializer.getSketchType(mem);
if (sketchType == SerializerDeserializer.SketchType.ArrayOfDoublesQuickSelectSketch) {
return new DirectArrayOfDoublesQuickSelectSketchR(mem, seed);
}
return new DirectArrayOfDoublesCompactSketch(mem, seed);
}
/**
* Estimates the cardinality of the set (number of unique values presented to the sketch)
* @return best estimate of the number of unique values
*/
public double getEstimate() {
if (!isEstimationMode()) { return getRetainedEntries(); }
return getRetainedEntries() / getTheta();
}
/**
* Gets the approximate upper error bound given the specified number of Standard Deviations.
* This will return getEstimate() if isEmpty() is true.
*
* @param numStdDev
* <a href="{@docRoot}/resources/dictionary.html#numStdDev">See Number of Standard Deviations</a>
* @return the upper bound.
*/
public double getUpperBound(final int numStdDev) {
if (!isEstimationMode()) { return getRetainedEntries(); }
return BinomialBoundsN.getUpperBound(getRetainedEntries(), getTheta(), numStdDev, isEmpty_);
}
/**
* Gets the approximate lower error bound given the specified number of Standard Deviations.
* This will return getEstimate() if isEmpty() is true.
*
* @param numStdDev
* <a href="{@docRoot}/resources/dictionary.html#numStdDev">See Number of Standard Deviations</a>
* @return the lower bound.
*/
public double getLowerBound(final int numStdDev) {
if (!isEstimationMode()) { return getRetainedEntries(); }
return BinomialBoundsN.getLowerBound(getRetainedEntries(), getTheta(), numStdDev, isEmpty_);
}
/**
* Returns true if this sketch's data structure is backed by Memory or WritableMemory.
* @return true if this sketch's data structure is backed by Memory or WritableMemory.
*/
public abstract boolean hasMemory();
/**
* Returns the Memory object if it exists, otherwise null.
* @return the Memory object if it exists, otherwise null.
*/
abstract Memory getMemory();
/**
* <a href="{@docRoot}/resources/dictionary.html#empty">See Empty</a>
* @return true if empty.
*/
public boolean isEmpty() {
return isEmpty_;
}
/**
* @return number of double values associated with each key
*/
public int getNumValues() {
return numValues_;
}
/**
* Returns true if the sketch is Estimation Mode (as opposed to Exact Mode).
* This is true if theta < 1.0 AND isEmpty() is false.
* @return true if the sketch is in estimation mode.
*/
public boolean isEstimationMode() {
return ((thetaLong_ < Long.MAX_VALUE) && !isEmpty());
}
/**
* Gets the value of theta as a double between zero and one
* @return the value of theta as a double
*/
public double getTheta() {
return getThetaLong() / (double) Long.MAX_VALUE;
}
/**
* @return number of retained entries
*/
public abstract int getRetainedEntries();
/**
* @return the maximum number of bytes for this sketch when serialized.
*/
public abstract int getMaxBytes();
/**
* For compact sketches this is the same as getMaxBytes().
* @return the current number of bytes for this sketch when serialized.
*/
public abstract int getCurrentBytes();
/**
* @return serialized representation of the sketch
*/
public abstract byte[] toByteArray();
/**
* @return array of arrays of double values in the sketch
*/
public abstract double[][] getValues();
abstract double[] getValuesAsOneDimension();
abstract long[] getKeys();
/**
* @return the value of theta as a long
*/
long getThetaLong() {
return isEmpty() ? Long.MAX_VALUE : thetaLong_;
}
abstract short getSeedHash();
/**
* @return iterator over the sketch
*/
public abstract ArrayOfDoublesSketchIterator iterator();
/**
* @return this sketch in compact form, which is immutable.
*/
public ArrayOfDoublesCompactSketch compact() {
return compact(null);
}
/**
*
* @param dstMem the destination WritableMemory
* @return this sketch in compact form, which is immutable.
*/
public abstract ArrayOfDoublesCompactSketch compact(WritableMemory dstMem);
@Override
public String toString() {
final int seedHash = Short.toUnsignedInt(getSeedHash());
final StringBuilder sb = new StringBuilder();
sb.append("### ").append(this.getClass().getSimpleName()).append(" SUMMARY: ").append(LS);
sb.append(" Estimate : ").append(getEstimate()).append(LS);
sb.append(" Upper Bound, 95% conf : ").append(getUpperBound(2)).append(LS);
sb.append(" Lower Bound, 95% conf : ").append(getLowerBound(2)).append(LS);
sb.append(" Theta (double) : ").append(getTheta()).append(LS);
sb.append(" Theta (long) : ").append(getThetaLong()).append(LS);
sb.append(" EstMode? : ").append(isEstimationMode()).append(LS);
sb.append(" Empty? : ").append(isEmpty()).append(LS);
sb.append(" Retained Entries : ").append(getRetainedEntries()).append(LS);
if (this instanceof ArrayOfDoublesUpdatableSketch) {
final ArrayOfDoublesUpdatableSketch updatable = (ArrayOfDoublesUpdatableSketch) this;
sb.append(" Nominal Entries (k) : ").append(updatable.getNominalEntries()).append(LS);
sb.append(" Current Capacity : ").append(updatable.getCurrentCapacity()).append(LS);
sb.append(" Resize Factor : ").append(updatable.getResizeFactor().getValue()).append(LS);
sb.append(" Sampling Probability (p): ").append(updatable.getSamplingProbability()).append(LS);
}
sb.append(" Seed Hash : ")
.append(Integer.toHexString(seedHash)).append(" | ").append(seedHash).append(LS);
sb.append("### END SKETCH SUMMARY").append(LS);
return sb.toString();
}
}
| 2,605 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple/arrayofdoubles/HeapArrayOfDoublesCompactSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.tuple.arrayofdoubles;
import java.nio.ByteOrder;
import java.util.Arrays;
import org.apache.datasketches.common.Family;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
import org.apache.datasketches.thetacommon.ThetaUtil;
import org.apache.datasketches.tuple.SerializerDeserializer;
import org.apache.datasketches.tuple.Util;
/**
* The on-heap implementation of tuple Compact Sketch of type ArrayOfDoubles.
*/
final class HeapArrayOfDoublesCompactSketch extends ArrayOfDoublesCompactSketch {
private final short seedHash_;
private long[] keys_;
private double[] values_;
/**
* Converts the given UpdatableArrayOfDoublesSketch to this compact form.
* @param sketch the given UpdatableArrayOfDoublesSketch
*/
HeapArrayOfDoublesCompactSketch(final ArrayOfDoublesUpdatableSketch sketch) {
this(sketch, sketch.getThetaLong());
}
/**
* Converts the given UpdatableArrayOfDoublesSketch to this compact form
* trimming if necessary according to given thetaLong
* @param sketch the given UpdatableArrayOfDoublesSketch
* @param thetaLong new value of thetaLong
*/
HeapArrayOfDoublesCompactSketch(final ArrayOfDoublesUpdatableSketch sketch, final long thetaLong) {
super(sketch.getNumValues());
isEmpty_ = sketch.isEmpty();
thetaLong_ = Math.min(sketch.getThetaLong(), thetaLong);
seedHash_ = Util.computeSeedHash(sketch.getSeed());
final int count = sketch.getRetainedEntries();
if (count > 0) {
keys_ = new long[count];
values_ = new double[count * numValues_];
final ArrayOfDoublesSketchIterator it = sketch.iterator();
int i = 0;
while (it.next()) {
final long key = it.getKey();
if (key < thetaLong_) {
keys_[i] = key;
System.arraycopy(it.getValues(), 0, values_, i * numValues_, numValues_);
i++;
}
}
// trim if necessary
if (i < count) {
if (i == 0) {
keys_ = null;
values_ = null;
} else {
keys_ = Arrays.copyOf(keys_, i);
values_ = Arrays.copyOf(values_, i * numValues_);
}
}
}
}
/*
* Creates an instance from components
*/
HeapArrayOfDoublesCompactSketch(final long[] keys, final double[] values, final long thetaLong,
final boolean isEmpty, final int numValues, final short seedHash) {
super(numValues);
keys_ = keys;
values_ = values;
thetaLong_ = thetaLong;
isEmpty_ = isEmpty;
seedHash_ = seedHash;
}
/**
* This is to create an instance given a serialized form
* @param mem <a href="{@docRoot}/resources/dictionary.html#mem">See Memory</a>
*/
HeapArrayOfDoublesCompactSketch(final Memory mem) {
this(mem, ThetaUtil.DEFAULT_UPDATE_SEED);
}
/**
* This is to create an instance given a serialized form
* @param mem <a href="{@docRoot}/resources/dictionary.html#mem">See Memory</a>
* @param seed <a href="{@docRoot}/resources/dictionary.html#seed">See seed</a>
*/
HeapArrayOfDoublesCompactSketch(final Memory mem, final long seed) {
super(mem.getByte(NUM_VALUES_BYTE));
seedHash_ = mem.getShort(SEED_HASH_SHORT);
SerializerDeserializer.validateFamily(mem.getByte(FAMILY_ID_BYTE),
mem.getByte(PREAMBLE_LONGS_BYTE));
SerializerDeserializer.validateType(mem.getByte(SKETCH_TYPE_BYTE),
SerializerDeserializer.SketchType.ArrayOfDoublesCompactSketch);
final byte version = mem.getByte(SERIAL_VERSION_BYTE);
if (version != serialVersionUID) {
throw new SketchesArgumentException(
"Serial version mismatch. Expected: " + serialVersionUID + ", actual: " + version);
}
final boolean isBigEndian =
(mem.getByte(FLAGS_BYTE) & (1 << Flags.IS_BIG_ENDIAN.ordinal())) != 0;
if (isBigEndian ^ ByteOrder.nativeOrder().equals(ByteOrder.BIG_ENDIAN)) {
throw new SketchesArgumentException("Byte order mismatch");
}
Util.checkSeedHashes(seedHash_, Util.computeSeedHash(seed));
isEmpty_ = (mem.getByte(FLAGS_BYTE) & (1 << Flags.IS_EMPTY.ordinal())) != 0;
thetaLong_ = mem.getLong(THETA_LONG);
final boolean hasEntries =
(mem.getByte(FLAGS_BYTE) & (1 << Flags.HAS_ENTRIES.ordinal())) != 0;
if (hasEntries) {
final int count = mem.getInt(RETAINED_ENTRIES_INT);
keys_ = new long[count];
values_ = new double[count * numValues_];
mem.getLongArray(ENTRIES_START, keys_, 0, count);
mem.getDoubleArray(ENTRIES_START + ((long) SIZE_OF_KEY_BYTES * count), values_, 0, values_.length);
}
}
@Override
public ArrayOfDoublesCompactSketch compact(final WritableMemory dstMem) {
if (dstMem == null) {
return new
HeapArrayOfDoublesCompactSketch(keys_.clone(), values_.clone(), thetaLong_, isEmpty_, numValues_, seedHash_);
} else {
final byte[] byteArr = this.toByteArray();
dstMem.putByteArray(0, byteArr, 0, byteArr.length);
return new DirectArrayOfDoublesCompactSketch(dstMem);
}
}
@Override
public int getRetainedEntries() {
return keys_ == null ? 0 : keys_.length;
}
@Override
public byte[] toByteArray() {
final int count = getRetainedEntries();
final int sizeBytes = getCurrentBytes();
final byte[] bytes = new byte[sizeBytes];
final WritableMemory mem = WritableMemory.writableWrap(bytes);
mem.putByte(PREAMBLE_LONGS_BYTE, (byte) 1);
mem.putByte(SERIAL_VERSION_BYTE, serialVersionUID);
mem.putByte(FAMILY_ID_BYTE, (byte) Family.TUPLE.getID());
mem.putByte(SKETCH_TYPE_BYTE,
(byte) SerializerDeserializer.SketchType.ArrayOfDoublesCompactSketch.ordinal());
final boolean isBigEndian = ByteOrder.nativeOrder().equals(ByteOrder.BIG_ENDIAN);
mem.putByte(FLAGS_BYTE, (byte) (
((isBigEndian ? 1 : 0) << Flags.IS_BIG_ENDIAN.ordinal())
| ((isEmpty() ? 1 : 0) << Flags.IS_EMPTY.ordinal())
| ((count > 0 ? 1 : 0) << Flags.HAS_ENTRIES.ordinal())
));
mem.putByte(NUM_VALUES_BYTE, (byte) numValues_);
mem.putShort(SEED_HASH_SHORT, seedHash_);
mem.putLong(THETA_LONG, thetaLong_);
if (count > 0) {
mem.putInt(RETAINED_ENTRIES_INT, count);
mem.putLongArray(ENTRIES_START, keys_, 0, count);
mem.putDoubleArray(ENTRIES_START + ((long) SIZE_OF_KEY_BYTES * count), values_, 0, values_.length);
}
return bytes;
}
@Override
//converts compact heap array of double[] to compact double[][]
public double[][] getValues() {
final int count = getRetainedEntries();
final double[][] values = new double[count][];
if (count > 0) {
int i = 0;
for (int j = 0; j < count; j++) {
values[i++] = Arrays.copyOfRange(values_, j * numValues_, (j + 1) * numValues_);
}
}
return values;
}
@Override
double[] getValuesAsOneDimension() {
return values_.clone();
}
@Override
long[] getKeys() {
return keys_.clone();
}
@Override
public ArrayOfDoublesSketchIterator iterator() {
return new HeapArrayOfDoublesSketchIterator(keys_, values_, numValues_);
}
@Override
short getSeedHash() {
return seedHash_;
}
@Override
public boolean hasMemory() { return false; }
@Override
Memory getMemory() { return null; }
}
| 2,606 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple/arrayofdoubles/DirectArrayOfDoublesUnionR.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.tuple.arrayofdoubles;
import org.apache.datasketches.common.SketchesReadOnlyException;
import org.apache.datasketches.memory.WritableMemory;
final class DirectArrayOfDoublesUnionR extends DirectArrayOfDoublesUnion {
/**
* Wraps the given Memory.
* @param gadget the ArrayOfDoublesQuickSelectSketch
* @param mem <a href="{@docRoot}/resources/dictionary.html#mem">See Memory</a>
*/
DirectArrayOfDoublesUnionR(final ArrayOfDoublesQuickSelectSketch gadget, final WritableMemory mem) {
super(gadget, mem);
}
@Override
public void union(final ArrayOfDoublesSketch tupleSketch) {
throw new SketchesReadOnlyException();
}
@Override
public void reset() {
throw new SketchesReadOnlyException();
}
}
| 2,607 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple/arrayofdoubles/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* This package is for a concrete implementation of the Tuple sketch for an array of double values.
*/
package org.apache.datasketches.tuple.arrayofdoubles;
| 2,608 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple/arrayofdoubles/ArrayOfDoublesAnotBImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.tuple.arrayofdoubles;
import static java.lang.Math.min;
import static org.apache.datasketches.common.Util.exactLog2OfLong;
import static org.apache.datasketches.thetacommon.HashOperations.continueCondition;
import static org.apache.datasketches.thetacommon.HashOperations.convertToHashTable;
import static org.apache.datasketches.thetacommon.HashOperations.count;
import static org.apache.datasketches.thetacommon.HashOperations.hashSearch;
import java.util.Arrays;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.common.SuppressFBWarnings;
import org.apache.datasketches.memory.WritableMemory;
import org.apache.datasketches.thetacommon.SetOperationCornerCases;
import org.apache.datasketches.thetacommon.SetOperationCornerCases.AnotbAction;
import org.apache.datasketches.thetacommon.SetOperationCornerCases.CornerCase;
import org.apache.datasketches.thetacommon.ThetaUtil;
import org.apache.datasketches.tuple.Util;
/**
* Computes a set difference, A-AND-NOT-B, of two ArrayOfDoublesSketches.
*
* <p>This class includes a stateless operation as follows:</p>
*
* <pre><code>
* CompactSketch csk = anotb.aNotB(ArrayOfDoublesSketch skA, ArrayOfDoublesSketch skB);
* </code></pre>
*
* @author Lee Rhodes
*/
public class ArrayOfDoublesAnotBImpl extends ArrayOfDoublesAnotB {
private int numValues_;
private short seedHash_;
private long thetaLong_ = Long.MAX_VALUE;
private boolean empty_ = true;
private long[] keys_;
private double[] values_;
private int count_;
ArrayOfDoublesAnotBImpl(final int numValues, final long seed) {
numValues_ = numValues;
seedHash_ = Util.computeSeedHash(seed);
}
@Override
@SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = "This is OK here")
public void update(final ArrayOfDoublesSketch skA, final ArrayOfDoublesSketch skB) {
if (skA == null || skB == null) {
throw new SketchesArgumentException("Neither argument may be null.");
}
numValues_ = skA.getNumValues();
seedHash_ = skA.getSeedHash();
if (numValues_ != skB.getNumValues()) {
throw new SketchesArgumentException("Inputs cannot have different numValues");
}
if (seedHash_ != skB.getSeedHash()) {
throw new SketchesArgumentException("Inputs cannot have different seedHashes");
}
final long thetaLongA = skA.getThetaLong();
final int countA = skA.getRetainedEntries();
final boolean emptyA = skA.isEmpty();
final long thetaLongB = skB.getThetaLong();
final int countB = skB.getRetainedEntries();
final boolean emptyB = skB.isEmpty();
final int id =
SetOperationCornerCases.createCornerCaseId(thetaLongA, countA, emptyA, thetaLongB, countB, emptyB);
final CornerCase cCase = CornerCase.caseIdToCornerCase(id);
final AnotbAction anotbAction = cCase.getAnotbAction();
final long minThetaLong = min(thetaLongA, thetaLongB);
switch (anotbAction) {
case EMPTY_1_0_T: {
reset();
break;
}
case DEGEN_MIN_0_F: {
keys_ = null;
values_ = null;
thetaLong_ = minThetaLong;
empty_ = false;
count_ = 0;
break;
}
case DEGEN_THA_0_F: {
keys_ = null;
values_ = null;
thetaLong_ = thetaLongA;
empty_ = false;
count_ = 0;
break;
}
case TRIM_A: {
final DataArrays daA = new DataArrays(skA.getKeys(), skA.getValuesAsOneDimension(), countA);
final DataArrays da = trimDataArrays(daA, minThetaLong, numValues_);
keys_ = da.hashArr;
values_ = da.valuesArr;
thetaLong_ = minThetaLong;
empty_ = skA.isEmpty();
count_ = da.count;
break;
}
case SKETCH_A: {
final ArrayOfDoublesCompactSketch csk = skA.compact();
keys_ = csk.getKeys();
values_ = csk.getValuesAsOneDimension();
thetaLong_ = csk.thetaLong_;
empty_ = csk.isEmpty();
count_ = csk.getRetainedEntries();
break;
}
case FULL_ANOTB: { //both A and B should have valid entries.
final long[] keysA = skA.getKeys();
final double[] valuesA = skA.getValuesAsOneDimension();
final DataArrays daR = getResultArrays(minThetaLong, countA, keysA, valuesA, skB);
count_ = daR.count;
keys_ = (count_ == 0) ? null : daR.hashArr;
values_ = (count_ == 0) ? null : daR.valuesArr;
thetaLong_ = minThetaLong;
empty_ = (minThetaLong == Long.MAX_VALUE) && (count_ == 0);
break;
}
//default: not possible
}
}
@Override
public ArrayOfDoublesCompactSketch getResult() {
return new HeapArrayOfDoublesCompactSketch(keys_, values_, thetaLong_, empty_, numValues_, seedHash_);
}
@Override
public ArrayOfDoublesCompactSketch getResult(final WritableMemory dstMem) {
return new DirectArrayOfDoublesCompactSketch(keys_, values_, thetaLong_, empty_, numValues_, seedHash_, dstMem);
}
private static DataArrays getResultArrays(
final long minThetaLong,
final int countA,
final long[] hashArrA,
final double[] valuesArrA,
final ArrayOfDoublesSketch skB) {
final int numValues = skB.numValues_;
//create hashtable of skB
final long[] hashTableB = convertToHashTable(skB.getKeys(), skB.getRetainedEntries(), minThetaLong,
ThetaUtil.REBUILD_THRESHOLD);
//build temporary arrays of skA
long[] tmpHashArrA = new long[countA];
double[] tmpValuesArrA = new double[countA * numValues];
//search for non matches and build temp arrays
final int lgHTBLen = exactLog2OfLong(hashTableB.length);
int nonMatches = 0;
for (int i = 0; i < countA; i++) {
final long hash = hashArrA[i];
if (continueCondition(minThetaLong, hash)) { continue; }
final int index = hashSearch(hashTableB, lgHTBLen, hash);
if (index == -1) {
tmpHashArrA[nonMatches] = hash;
System.arraycopy(valuesArrA, i * numValues, tmpValuesArrA, nonMatches * numValues, numValues);
nonMatches++;
}
}
tmpHashArrA = Arrays.copyOf(tmpHashArrA, nonMatches);
tmpValuesArrA = Arrays.copyOf(tmpValuesArrA, nonMatches * numValues);
final DataArrays daR = new DataArrays(tmpHashArrA, tmpValuesArrA, nonMatches);
return daR;
}
private static class DataArrays {
long[] hashArr;
double[] valuesArr;
int count;
DataArrays(final long[] hashArr, final double[] valuesArr, final int count) {
this.hashArr = hashArr;
this.valuesArr = valuesArr;
this.count = count;
}
}
private static DataArrays trimDataArrays(final DataArrays da, final long thetaLong, final int numValues) {
final long[] hashArrIn = da.hashArr;
final double[] valuesArrIn = da.valuesArr;
final int count = count(hashArrIn, thetaLong);
final long[] hashArrOut = new long[count];
final double[] valuesArrOut = new double[count * numValues];
int haInIdx;
int vaInIdx = 0;
int haOutIdx = 0;
int vaOutIdx = 0;
for (haInIdx = 0; haInIdx < count; haInIdx++, vaInIdx += numValues) {
final long hash = hashArrIn[haInIdx];
if (continueCondition(thetaLong, hash)) { continue; }
hashArrOut[haOutIdx] = hashArrIn[haInIdx];
System.arraycopy(valuesArrIn, vaInIdx, valuesArrOut, vaOutIdx, numValues);
haOutIdx++;
vaOutIdx += numValues;
}
return new DataArrays(hashArrOut, valuesArrOut, count);
}
private void reset() {
empty_ = true;
thetaLong_ = Long.MAX_VALUE;
keys_ = null;
values_ = null;
count_ = 0;
}
}
| 2,609 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple/arrayofdoubles/HeapArrayOfDoublesIntersection.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.tuple.arrayofdoubles;
/**
* On-heap implementation of intersection set operation for tuple sketches of type
* ArrayOfDoubles.
*/
final class HeapArrayOfDoublesIntersection extends ArrayOfDoublesIntersection {
/**
* Creates an instance of a HeapArrayOfDoublesIntersection with a custom update seed
* @param numValues number of double values associated with each key
* @param seed <a href="{@docRoot}/resources/dictionary.html#seed">See seed</a>
*/
HeapArrayOfDoublesIntersection(final int numValues, final long seed) {
super(numValues, seed);
}
@Override
protected ArrayOfDoublesQuickSelectSketch createSketch(final int nomEntries, final int numValues,
final long seed) {
return new HeapArrayOfDoublesQuickSelectSketch(nomEntries, 0, 1f, numValues, seed);
}
}
| 2,610 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/tuple/arrayofdoubles/DirectArrayOfDoublesUnion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.tuple.arrayofdoubles;
import org.apache.datasketches.common.Family;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
import org.apache.datasketches.tuple.SerializerDeserializer;
/**
* Direct Union operation for tuple sketches of type ArrayOfDoubles.
* <p>This implementation uses data in a given Memory that is owned and managed by the caller.
* This Memory can be off-heap, which if managed properly will greatly reduce the need for
* the JVM to perform garbage collection.</p>
*/
class DirectArrayOfDoublesUnion extends ArrayOfDoublesUnion {
final WritableMemory mem_;
/**
* Creates an instance of DirectArrayOfDoublesUnion
* @param nomEntries Nominal number of entries. Forced to the nearest power of 2 greater than
* given value.
* @param numValues Number of double values to keep for each key.
* @param seed <a href="{@docRoot}/resources/dictionary.html#seed">See seed</a>
* @param dstMem <a href="{@docRoot}/resources/dictionary.html#mem">See Memory</a>
*/
DirectArrayOfDoublesUnion(final int nomEntries, final int numValues, final long seed,
final WritableMemory dstMem) {
super(new DirectArrayOfDoublesQuickSelectSketch(nomEntries, 3, 1f, numValues, seed,
dstMem.writableRegion(PREAMBLE_SIZE_BYTES, dstMem.getCapacity() - PREAMBLE_SIZE_BYTES)));
mem_ = dstMem;
mem_.putByte(PREAMBLE_LONGS_BYTE, (byte) 1); // unused, always 1
mem_.putByte(SERIAL_VERSION_BYTE, serialVersionUID);
mem_.putByte(FAMILY_ID_BYTE, (byte) Family.TUPLE.getID());
mem_.putByte(SKETCH_TYPE_BYTE, (byte) SerializerDeserializer.SketchType.ArrayOfDoublesUnion.ordinal());
mem_.putLong(THETA_LONG, gadget_.getThetaLong());
}
//Called from wrapUnion below and extended by DirectArrayOfDoublesUnionR
DirectArrayOfDoublesUnion(final ArrayOfDoublesQuickSelectSketch gadget, final WritableMemory mem) {
super(gadget);
mem_ = mem;
unionThetaLong_ = mem.getLong(THETA_LONG);
}
@Override
void setUnionThetaLong(final long thetaLong) {
super.setUnionThetaLong(thetaLong);
mem_.putLong(THETA_LONG, thetaLong);
}
static ArrayOfDoublesUnion wrapUnion(final WritableMemory mem, final long seed, final boolean isWritable) {
final byte version = mem.getByte(ArrayOfDoublesUnion.SERIAL_VERSION_BYTE);
if (version != ArrayOfDoublesUnion.serialVersionUID) {
throw new SketchesArgumentException("Serial version mismatch. Expected: "
+ serialVersionUID + ", actual: " + version);
}
SerializerDeserializer.validateFamily(mem.getByte(FAMILY_ID_BYTE), mem.getByte(PREAMBLE_LONGS_BYTE));
SerializerDeserializer.validateType(mem.getByte(SKETCH_TYPE_BYTE),
SerializerDeserializer.SketchType.ArrayOfDoublesUnion);
if (isWritable) {
final WritableMemory sketchMem = mem.writableRegion(PREAMBLE_SIZE_BYTES,
mem.getCapacity() - PREAMBLE_SIZE_BYTES);
return new DirectArrayOfDoublesUnion(new DirectArrayOfDoublesQuickSelectSketch(sketchMem, seed), mem);
}
final Memory sketchMem = mem.region(PREAMBLE_SIZE_BYTES, mem.getCapacity() - PREAMBLE_SIZE_BYTES);
return new DirectArrayOfDoublesUnionR(new DirectArrayOfDoublesQuickSelectSketchR(sketchMem, seed), mem);
}
}
| 2,611 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/hllmap/Map.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.hllmap;
import java.math.BigInteger;
import org.apache.datasketches.hash.MurmurHash3;
/**
* Base class and API for all the maps.
*
* @author Lee Rhodes
* @author Alexander Saydakov
* @author Kevin Lang
*/
abstract class Map {
private static final String LS = System.getProperty("line.separator");
static final long SEED = 1234567890L;
static final int SIX_BIT_MASK = 0X3F; // 6 bits
static final int TEN_BIT_MASK = 0X3FF; //10 bits
// These parameters are tuned as a set to avoid pathological resizing.
// Consider modeling the behavior before changing any of them
static final int COUPON_MAP_MIN_NUM_ENTRIES = 157;
static final double COUPON_MAP_SHRINK_TRIGGER_FACTOR = 0.5;
static final double COUPON_MAP_GROW_TRIGGER_FACTOR = 15.0 / 16.0;
static final double COUPON_MAP_TARGET_FILL_FACTOR = 2.0 / 3.0;
static final int COUPON_MAP_MIN_NUM_ENTRIES_ARR_SIZE = (int)Math.ceil(COUPON_MAP_MIN_NUM_ENTRIES / 8.0);
final int keySizeBytes_;
Map(final int keySizeBytes) {
keySizeBytes_ = keySizeBytes;
}
/**
* Update this map with a key and a coupon.
* Return the cardinality estimate of all identifiers that have been associated with this key,
* including this update.
* @param key the dimensional criteria for measuring cardinality
* @param coupon the property associated with the key for which cardinality is to be measured.
* @return the cardinality estimate of all identifiers that have been associated with this key,
* including this update.
*/
abstract double update(byte[] key, short coupon);
/**
* Updates this map with an index and a coupon
* @param index the given index
* @param coupon the given coupon
* @return the cardinality estimate of all identifiers that have been associated with this key,
* including this update.
*/
abstract double update(int index, short coupon);
/**
* Returns the estimate of the cardinality of identifiers associated with the given key.
* @param key the given key
* @return the estimate of the cardinality of identifiers associated with the given key.
*/
abstract double getEstimate(byte[] key);
/**
* Update the internal estimate at the given index
* @param index the given index
* @param estimate the given estimate
*/
void updateEstimate(final int index, final double estimate) {}
/**
* Returns the upper bound cardinality with respect to {@link #getEstimate(byte[])} associated
* with the given key. This approximates the RSE with 68% confidence.
* @param key the given key
* @return the upper bound cardinality with respect to {@link #getEstimate(byte[])} associated
* with the given key.
*/
abstract double getUpperBound(byte[] key);
/**
* Returns the lower bound cardinality with respect to {@link #getEstimate(byte[])} associated
* with the given key. This approximates the RSE with 68% confidence.
* @param key the given key
* @return the lower bound cardinality with respect to {@link #getEstimate(byte[])} associated
* with the given key.
*/
abstract double getLowerBound(byte[] key);
abstract int findKey(byte[] key);
abstract int findOrInsertKey(byte[] key);
abstract CouponsIterator getCouponsIterator(int index);
abstract int getMaxCouponsPerEntry();
abstract int getCapacityCouponsPerEntry();
abstract int getActiveEntries();
abstract int getDeletedEntries();
abstract double getEntrySizeBytes();
abstract int getTableEntries();
abstract int getCapacityEntries();
abstract int getCurrentCountEntries();
abstract long getMemoryUsageBytes();
int getKeySizeBytes() {
return keySizeBytes_;
}
/**
* Delete the key at the given index
* @param index the given index
*/
void deleteKey(final int index) {}
/**
* Returns <code>true</code> if the two specified sub-arrays of bytes are <i>equal</i> to one another.
* Two arrays are considered equal if all corresponding pairs of elements in the two arrays are
* equal. In other words, two arrays are equal if and only if they contain the same elements
* in the same order.
*
* @param a one sub-array to be tested for equality
* @param offsetA the offset in bytes of the start of sub-array <i>a</i>.
* @param b the other sub-array to be tested for equality
* @param offsetB the offset in bytes of the start of sub-array <i>b</i>.
* @param length the length in bytes of the two sub-arrays.
* @return <code>true</code> if the two sub-arrays are equal
*/
static final boolean arraysEqual(final byte[] a, final int offsetA, final byte[] b,
final int offsetB, final int length) {
for (int i = 0; i < length; i++) {
if (a[i + offsetA] != b[i + offsetB]) {
return false;
}
}
return true;
}
/**
* Returns the HLL array index and value as a 16-bit coupon given the identifier to be hashed
* and k.
* @param identifier the given identifier
* @return the HLL array index and value
*/
static final int coupon16(final byte[] identifier) {
final long[] hash = MurmurHash3.hash(identifier, SEED);
final int hllIdx = (int) (((hash[0] >>> 1) % 1024) & TEN_BIT_MASK); //hash[0] for 10-bit address
final int lz = Long.numberOfLeadingZeros(hash[1]);
final int value = (lz > 62 ? 62 : lz) + 1;
return (value << 10) | hllIdx;
}
static final int coupon16Value(final int coupon) {
return (coupon >>> 10) & SIX_BIT_MASK;
}
static final int getIndex(final long hash, final int tableEntries) {
return (int) ((hash >>> 1) % tableEntries);
}
static final int getStride(final long hash, final int tableEntries) {
return (int) ((hash >>> 1) % (tableEntries - 2L) + 1L);
}
static boolean isBitSet(final byte[] byteArr, final int bitIndex) {
final int byteIndex = bitIndex / 8;
final int mask = 1 << (bitIndex % 8);
return (byteArr[byteIndex] & mask) > 0;
}
static boolean isBitClear(final byte[] byteArr, final int bitIndex) {
final int byteIndex = bitIndex / 8;
final int mask = 1 << (bitIndex % 8);
return (byteArr[byteIndex] & mask) == 0;
}
static void clearBit(final byte[] byteArr, final int index) {
final int byteIndex = index / 8;
final int mask = 1 << (index % 8);
byteArr[byteIndex] &= ~mask;
}
static void setBit(final byte[] bits, final int index) {
final int byteIndex = index / 8;
final int mask = 1 << (index % 8);
bits[byteIndex] |= mask;
}
/**
* Returns the next prime number that is greater than the given target. There will be
* no prime numbers less than the returned prime number that are greater than the given target.
* @param target the starting value to begin the search for the next prime
* @return the next prime number that is greater than the given target.
*/
static int nextPrime(final int target) {
return BigInteger.valueOf(target).nextProbablePrime().intValueExact();
}
static String fmtLong(final long value) {
return String.format("%,d", value);
}
static String fmtDouble(final double value) {
return String.format("%,.3f", value);
}
@Override
public String toString() {
final String mcpe = Map.fmtLong(getMaxCouponsPerEntry());
final String ccpe = Map.fmtLong(getCapacityCouponsPerEntry());
final String te = Map.fmtLong(getTableEntries());
final String ce = Map.fmtLong(getCapacityEntries());
final String cce = Map.fmtLong(getCurrentCountEntries());
final String ae = Map.fmtLong(getActiveEntries());
final String de = Map.fmtLong(getDeletedEntries());
final String esb = Map.fmtDouble(getEntrySizeBytes());
final String mub = Map.fmtLong(getMemoryUsageBytes());
final StringBuilder sb = new StringBuilder();
final String thisSimpleName = this.getClass().getSimpleName();
sb.append("### ").append(thisSimpleName).append(" SUMMARY: ").append(LS);
sb.append(" Max Coupons Per Entry : ").append(mcpe).append(LS);
sb.append(" Capacity Coupons Per Entry: ").append(ccpe).append(LS);
sb.append(" Table Entries : ").append(te).append(LS);
sb.append(" Capacity Entries : ").append(ce).append(LS);
sb.append(" Current Count Entries : ").append(cce).append(LS);
sb.append(" Active Entries : ").append(ae).append(LS);
sb.append(" Deleted Entries : ").append(de).append(LS);
sb.append(" Entry Size Bytes : ").append(esb).append(LS);
sb.append(" Memory Usage Bytes : ").append(mub).append(LS);
sb.append("### END SKETCH SUMMARY").append(LS);
return sb.toString();
}
}
| 2,612 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/hllmap/CouponTraverseMap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.hllmap;
import java.util.Arrays;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.hash.MurmurHash3;
/**
* Implements a key-value map where the value is a simple array of coupons. Search operations are a
* simple traverse of the consecutive coupons. Because of this, the maximum practical size of the
* coupon array is about 8 coupons.
*
* <p>The map is implemented as a prime-sized, Open Address, Double Hash, with deletes and a 1-bit
* state array. The size of this map can grow or shrink.
*
* @author Lee Rhodes
* @author Alexander Saydakov
* @author Kevin Lang
*/
final class CouponTraverseMap extends Map {
private static final double RSE = 0.408 / Math.sqrt(1024);
//private static int
private final int maxCouponsPerKey_;
private int tableEntries_;
private int capacityEntries_;
private int numActiveKeys_;
private int numDeletedKeys_;
private double entrySizeBytes_;
//Arrays
private byte[] keysArr_;
private short[] couponsArr_;
/**
* <ul><li>State: 0: Empty always, don't need to look at 1st coupon. Coupons could be dirty.</li>
* <li>State: 1: Valid entry or dirty. During rebuild, look at the first coupon to determine.
* If first coupon != 0 means valid entry; first coupon == 0: dirty (we set to 0 when deleted)</li>
* </ul>
*/
private byte[] stateArr_;
private CouponTraverseMap(final int keySizeBytes, final int maxCouponsPerKey) {
super(keySizeBytes);
maxCouponsPerKey_ = maxCouponsPerKey;
}
static CouponTraverseMap getInstance(final int keySizeBytes, final int maxCouponsPerKey) {
final CouponTraverseMap map = new CouponTraverseMap(keySizeBytes, maxCouponsPerKey);
map.tableEntries_ = COUPON_MAP_MIN_NUM_ENTRIES;
map.capacityEntries_ = (int)(map.tableEntries_ * COUPON_MAP_GROW_TRIGGER_FACTOR);
map.numActiveKeys_ = 0;
map.numDeletedKeys_ = 0;
map.entrySizeBytes_ = updateEntrySizeBytes(map.tableEntries_, keySizeBytes, maxCouponsPerKey);
map.keysArr_ = new byte[COUPON_MAP_MIN_NUM_ENTRIES * keySizeBytes];
map.couponsArr_ = new short[COUPON_MAP_MIN_NUM_ENTRIES * maxCouponsPerKey];
map.stateArr_ = new byte[COUPON_MAP_MIN_NUM_ENTRIES_ARR_SIZE];
return map;
}
@Override //used for test
double update(final byte[] key, final short coupon) {
final int entryIndex = findOrInsertKey(key);
return update(entryIndex, coupon);
}
@Override
double update(final int entryIndex, final short value) {
final int offset = entryIndex * maxCouponsPerKey_;
boolean wasFound = false;
for (int i = 0; i < maxCouponsPerKey_; i++) {
if (couponsArr_[offset + i] == 0) {
if (wasFound) { return i; }
couponsArr_[offset + i] = value;
return i + 1;
}
if (couponsArr_[offset + i] == value) {
wasFound = true;
}
}
if (wasFound) { return maxCouponsPerKey_; }
return -maxCouponsPerKey_; //signal to promote
}
@Override
double getEstimate(final byte[] key) {
final int entryIndex = findKey(key);
if (entryIndex < 0) { return 0; }
return getCouponCount(entryIndex);
}
@Override
double getUpperBound(final byte[] key) {
return getEstimate(key) * (1 + RSE);
}
@Override
double getLowerBound(final byte[] key) {
return getEstimate(key) * (1 - RSE);
}
/**
* Returns entryIndex if the given key is found. If not found, returns one's complement entryIndex
* of an empty slot for insertion, which may be over a deleted key.
* @param key the given key
* @return the entryIndex
*/
@Override
int findKey(final byte[] key) {
final long[] hash = MurmurHash3.hash(key, SEED);
int entryIndex = getIndex(hash[0], tableEntries_);
int firstDeletedIndex = -1;
final int loopIndex = entryIndex;
do {
if (isBitClear(stateArr_, entryIndex)) {
return firstDeletedIndex == -1 ? ~entryIndex : ~firstDeletedIndex; // found empty or deleted
}
if (couponsArr_[entryIndex * maxCouponsPerKey_] == 0) { //found deleted
if (firstDeletedIndex == -1) { firstDeletedIndex = entryIndex; }
} else if (Map.arraysEqual(keysArr_, entryIndex * keySizeBytes_, key, 0, keySizeBytes_)) {
return entryIndex; // found key
}
entryIndex = (entryIndex + getStride(hash[1], tableEntries_)) % tableEntries_;
} while (entryIndex != loopIndex);
throw new SketchesArgumentException("Key not found and no empty slots!");
}
@Override
int findOrInsertKey(final byte[] key) {
int entryIndex = findKey(key);
if (entryIndex < 0) {
entryIndex = ~entryIndex;
if (isBitSet(stateArr_, entryIndex)) { // reusing slot from a deleted key
clearCouponArea(entryIndex);
numDeletedKeys_--;
}
if ((numActiveKeys_ + numDeletedKeys_ + 1) > capacityEntries_) {
resize();
entryIndex = ~findKey(key);
assert entryIndex >= 0;
}
System.arraycopy(key, 0, keysArr_, entryIndex * keySizeBytes_, keySizeBytes_);
setBit(stateArr_, entryIndex);
numActiveKeys_++;
}
return entryIndex;
}
@Override
void deleteKey(final int entryIndex) {
couponsArr_[entryIndex * maxCouponsPerKey_] = 0;
numActiveKeys_--;
numDeletedKeys_++;
if ((numActiveKeys_ > COUPON_MAP_MIN_NUM_ENTRIES)
&& (numActiveKeys_ < (tableEntries_ * COUPON_MAP_SHRINK_TRIGGER_FACTOR))) {
resize();
}
}
private int getCouponCount(final int entryIndex) {
final int offset = entryIndex * maxCouponsPerKey_;
for (int i = 0; i < maxCouponsPerKey_; i++) {
if (couponsArr_[offset + i] == 0) {
return i;
}
}
return maxCouponsPerKey_;
}
@Override
CouponsIterator getCouponsIterator(final int entryIndex) {
return new CouponsIterator(couponsArr_, entryIndex * maxCouponsPerKey_, maxCouponsPerKey_);
}
@Override
double getEntrySizeBytes() {
return entrySizeBytes_;
}
@Override
int getTableEntries() {
return tableEntries_;
}
@Override
int getCapacityEntries() {
return capacityEntries_;
}
@Override
int getCurrentCountEntries() {
return numActiveKeys_ + numDeletedKeys_;
}
@Override
long getMemoryUsageBytes() {
return keysArr_.length
+ ((long)couponsArr_.length * Short.BYTES)
+ stateArr_.length + (4L * Integer.BYTES);
}
@Override
int getActiveEntries() {
return numActiveKeys_;
}
@Override
int getDeletedEntries() {
return numDeletedKeys_;
}
@Override
int getMaxCouponsPerEntry() {
return maxCouponsPerKey_;
}
@Override
int getCapacityCouponsPerEntry() {
return maxCouponsPerKey_;
}
private void resize() { //can grow or shrink
final byte[] oldKeysArr = keysArr_;
final short[] oldCouponsArr = couponsArr_;
final byte[] oldStateArr = stateArr_;
final int oldSizeKeys = tableEntries_;
tableEntries_ = Math.max(
nextPrime((int) (numActiveKeys_ / COUPON_MAP_TARGET_FILL_FACTOR)),
COUPON_MAP_MIN_NUM_ENTRIES
);
capacityEntries_ = (int)(tableEntries_ * COUPON_MAP_GROW_TRIGGER_FACTOR);
numActiveKeys_ = 0;
numDeletedKeys_ = 0;
entrySizeBytes_ = updateEntrySizeBytes(tableEntries_, keySizeBytes_, maxCouponsPerKey_);
keysArr_ = new byte[tableEntries_ * keySizeBytes_];
couponsArr_ = new short[tableEntries_ * maxCouponsPerKey_];
stateArr_ = new byte[(int) Math.ceil(tableEntries_ / 8.0)];
//move data
for (int i = 0; i < oldSizeKeys; i++) {
if (isBitSet(oldStateArr, i) && (oldCouponsArr[i * maxCouponsPerKey_] != 0)) {
final byte[] key =
Arrays.copyOfRange(oldKeysArr, i * keySizeBytes_, (i * keySizeBytes_) + keySizeBytes_);
final int index = insertKey(key);
System.arraycopy(oldCouponsArr, i * maxCouponsPerKey_, couponsArr_,
index * maxCouponsPerKey_, maxCouponsPerKey_);
}
}
}
// for internal use during resize, so no resize check here
private int insertKey(final byte[] key) {
final long[] hash = MurmurHash3.hash(key, SEED);
int entryIndex = getIndex(hash[0], tableEntries_);
final int loopIndex = entryIndex;
do {
if (isBitClear(stateArr_, entryIndex)) {
System.arraycopy(key, 0, keysArr_, entryIndex * keySizeBytes_, keySizeBytes_);
setBit(stateArr_, entryIndex);
numActiveKeys_++;
return entryIndex;
}
entryIndex = (entryIndex + getStride(hash[1], tableEntries_)) % tableEntries_;
} while (entryIndex != loopIndex);
throw new SketchesArgumentException("Key not found and no empty slots!");
}
private void clearCouponArea(final int entryIndex) {
final int couponAreaIndex = entryIndex * maxCouponsPerKey_;
for (int i = 0; i < maxCouponsPerKey_; i++) {
couponsArr_[couponAreaIndex + i] = 0;
}
}
private static final double updateEntrySizeBytes(final int tableEntries, final int keySizeBytes,
final int maxCouponsPerKey) {
final double byteFraction = Math.ceil(tableEntries / 8.0) / tableEntries;
return keySizeBytes + ((double) maxCouponsPerKey * Short.BYTES) + byteFraction;
}
}
| 2,613 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/hllmap/SingleCouponMap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.hllmap;
import java.util.Arrays;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.hash.MurmurHash3;
/**
* Implements a key-value map where the value is a single coupon or a map reference.
* This map holds all keys for all levels of the {@link UniqueCountMap}.
* This map is implemented with a prime sized Open Address, Double Hash, with a 1-bit state array,
* which indicates the contents of the value.
*
* @author Lee Rhodes
* @author Alexander Saydakov
* @author Kevin Lang
*/
final class SingleCouponMap extends Map {
private static final double RSE = 0.408 / Math.sqrt(1024);
private int tableEntries_;
private int capacityEntries_;
private int curCountEntries_;
private double entrySizeBytes_;
// Arrays
private byte[] keysArr_;
private short[] couponsArr_;
/**
* <ul><li>state: 0: empty or valid; empty if coupon is 0, otherwise valid.</li>
* <li>state: 1: original coupon has been promoted, current coupon contains a table #
* reference instead.</li>
* </ul>
*/
private byte[] stateArr_;
private SingleCouponMap(final int keySizeBytes) {
super(keySizeBytes);
}
static SingleCouponMap getInstance(final int initialNumEntries, final int keySizeBytes) {
final int tableEntries = nextPrime(initialNumEntries);
final SingleCouponMap map = new SingleCouponMap(keySizeBytes);
map.tableEntries_ = tableEntries;
map.capacityEntries_ = (int)(tableEntries * COUPON_MAP_GROW_TRIGGER_FACTOR);
map.curCountEntries_ = 0;
map.entrySizeBytes_ = updateEntrySizeBytes(tableEntries, keySizeBytes);
map.keysArr_ = new byte[tableEntries * map.keySizeBytes_];
map.couponsArr_ = new short[tableEntries];
map.stateArr_ = new byte[(int) Math.ceil(tableEntries / 8.0)];
return map;
}
@Override
double update(final byte[] key, final short coupon) {
final int entryIndex = findOrInsertKey(key);
return update(entryIndex, coupon);
}
@Override
double update(final int entryIndex, final short coupon) {
if (couponsArr_[entryIndex] == 0) {
couponsArr_[entryIndex] = coupon;
return 1;
}
if (isCoupon(entryIndex)) {
if (couponsArr_[entryIndex] == coupon) { //duplicate
return 1;
}
return 0; // signal to promote
}
return -couponsArr_[entryIndex]; // negative level number
}
@Override
double getEstimate(final byte[] key) {
final int entryIndex = findKey(key);
if (entryIndex < 0) { return 0; }
if (isCoupon(entryIndex)) { return 1; }
return -getCoupon(entryIndex); // negative: level #, zero: signal to promote
}
@Override
double getUpperBound(final byte[] key) {
return getEstimate(key) * (1 + RSE);
}
@Override
double getLowerBound(final byte[] key) {
return getEstimate(key) * (1 - RSE);
}
/**
* Returns entryIndex if the given key is found. The coupon may be valid or contain a table index.
* If not found, returns one's complement entryIndex
* of an empty slot for insertion, which may be over a deleted key.
* @param key the given key
* @return the entryIndex
*/
@Override
int findKey(final byte[] key) {
final long[] hash = MurmurHash3.hash(key, SEED);
int entryIndex = getIndex(hash[0], tableEntries_);
final int stride = getStride(hash[1], tableEntries_);
final int loopIndex = entryIndex;
do {
if (couponsArr_[entryIndex] == 0) {
return ~entryIndex; //empty
}
if (Map.arraysEqual(key, 0, keysArr_, entryIndex * keySizeBytes_, keySizeBytes_)) {
return entryIndex;
}
entryIndex = (entryIndex + stride) % tableEntries_;
} while (entryIndex != loopIndex);
throw new SketchesArgumentException("Key not found and no empty slots!");
}
@Override
int findOrInsertKey(final byte[] key) {
int entryIndex = findKey(key);
if (entryIndex < 0) {
if (curCountEntries_ + 1 > capacityEntries_) {
resize();
entryIndex = findKey(key);
assert entryIndex < 0;
}
entryIndex = ~entryIndex;
System.arraycopy(key, 0, keysArr_, entryIndex * keySizeBytes_, keySizeBytes_);
curCountEntries_++;
}
return entryIndex;
}
@Override
CouponsIterator getCouponsIterator(final int entryIndex) {
return new CouponsIterator(couponsArr_, entryIndex, 1);
}
@Override
int getMaxCouponsPerEntry() {
return 1;
}
@Override
int getCapacityCouponsPerEntry() {
return 1;
}
@Override
int getActiveEntries() {
return curCountEntries_;
}
@Override
int getDeletedEntries() {
return 0;
}
boolean isCoupon(final int entryIndex) {
return !isBitSet(stateArr_, entryIndex);
}
short getCoupon(final int entryIndex) {
return couponsArr_[entryIndex];
}
void setCoupon(final int entryIndex, final short coupon, final boolean isLevel) {
couponsArr_[entryIndex] = coupon;
if (isLevel) {
setBit(stateArr_, entryIndex);
} else {
clearBit(stateArr_, entryIndex);
}
}
void setLevel(final int entryIndex, final int level) {
couponsArr_[entryIndex] = (short) level;
setBit(stateArr_, entryIndex);
}
@Override
double getEntrySizeBytes() {
return entrySizeBytes_;
}
@Override
int getTableEntries() {
return tableEntries_;
}
@Override
int getCapacityEntries() {
return capacityEntries_;
}
@Override
int getCurrentCountEntries() {
return curCountEntries_;
}
@Override
long getMemoryUsageBytes() {
final long arrays = keysArr_.length
+ (long)couponsArr_.length * Short.BYTES
+ stateArr_.length;
final long other = 4 * 4 + 8;
return arrays + other;
}
private void resize() {
final byte[] oldKeysArr = keysArr_;
final short[] oldCouponsArr = couponsArr_;
final byte[] oldStateArr = stateArr_;
final int oldTableEntries = tableEntries_;
tableEntries_ = nextPrime((int) (curCountEntries_ / COUPON_MAP_TARGET_FILL_FACTOR));
capacityEntries_ = (int)(tableEntries_ * COUPON_MAP_GROW_TRIGGER_FACTOR);
keysArr_ = new byte[tableEntries_ * keySizeBytes_];
couponsArr_ = new short[tableEntries_];
stateArr_ = new byte[(int) Math.ceil(tableEntries_ / 8.0)];
entrySizeBytes_ = updateEntrySizeBytes(tableEntries_, keySizeBytes_);
//move the data
for (int i = 0; i < oldTableEntries; i++) {
if (oldCouponsArr[i] != 0) {
final byte[] key =
Arrays.copyOfRange(oldKeysArr, i * keySizeBytes_, i * keySizeBytes_ + keySizeBytes_);
insertEntry(key, oldCouponsArr[i], isBitSet(oldStateArr, i));
}
}
}
// for internal use during resize, so no resize check here
private void insertEntry(final byte[] key, final int coupon, final boolean setStateOne) {
final long[] hash = MurmurHash3.hash(key, SEED);
int entryIndex = getIndex(hash[0], tableEntries_);
final int stride = getStride(hash[1], tableEntries_);
final int loopIndex = entryIndex;
do {
if (couponsArr_[entryIndex] == 0) {
System.arraycopy(key, 0, keysArr_, entryIndex * keySizeBytes_, keySizeBytes_);
setCoupon(entryIndex, (short)coupon, setStateOne);
return;
}
entryIndex = (entryIndex + stride) % tableEntries_;
} while (entryIndex != loopIndex);
throw new SketchesArgumentException("Key not found and no empty slots!");
}
private static final double updateEntrySizeBytes(final int tableEntries, final int keySizeBytes) {
final double byteFraction = Math.ceil(tableEntries / 8.0) / tableEntries;
return keySizeBytes + Short.BYTES + byteFraction;
}
}
| 2,614 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/hllmap/CouponsIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.hllmap;
/**
* Common iterator class for maps that need one.
*
* @author Alex Saydakov
*/
class CouponsIterator {
private final int offset_;
private final int maxEntries_;
private final short[] couponsArr_;
private int index_;
CouponsIterator(final short[] couponsArr, final int offset, final int maxEntries) {
offset_ = offset;
maxEntries_ = maxEntries;
couponsArr_ = couponsArr;
index_ = -1;
}
/**
* next() must be called before the first getValue(). This skips over zero values.
* @return the next coupon in the array.
*/
boolean next() {
index_++;
while (index_ < maxEntries_) {
if (couponsArr_[offset_ + index_] != 0) { return true; }
index_++;
}
return false;
}
/**
* Returns the value at the current index.
* @return the value at the current index.
*/
short getValue() {
return couponsArr_[offset_ + index_];
}
}
| 2,615 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/hllmap/HllMap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.hllmap;
import static java.lang.Math.log;
import static java.lang.Math.sqrt;
import static org.apache.datasketches.common.Util.invPow2;
import java.util.Arrays;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.common.SuppressFBWarnings;
import org.apache.datasketches.hash.MurmurHash3;
/**
* Implements a key-value map where the value is a compact HLL sketch of size k.
* The HLL bins are compacted into 10 bins per long so that a 1024 bins are compacted into
* 824 bytes, which is a 20% reduction in space. Higher density compressions are possible
* (up to 50%), but the required code is much more complex and considerably slower.
*
* <p>Each entry row, associated with a key, also contains 3 double registers for accurately
* tracking the HIP (Historical Inverse Probability) estimator. HLL implementations have multiple
* estimators and the early estimators in this implementation are quite novel and provide superior
* error performance over most other HLL implementations.
*
* @author Lee Rhodes
* @author KevinLang
* @author Alexander Saydakov
*/
final class HllMap extends Map {
private static final double LOAD_FACTOR = 15.0 / 16.0;
private static final int HLL_INIT_NUM_ENTRIES = 157;
private static final int HLL_INIT_NUM_ENTRIES_ARR_SIZE = (int) Math.ceil(HLL_INIT_NUM_ENTRIES / 8.0);
private static final float HLL_RESIZE_FACTOR = 2.0F;
private static final double RSE = sqrt(log(2.0)) / 32.0;
private final int k_;
private final int hllArrLongs_; //# of longs required to store the HLL array
private int tableEntries_; //Full size of the table
private int capacityEntries_; //max capacity entries defined by Load factor
private int curCountEntries_; //current count of valid entries
private float growthFactor_; //e.g., 1.2 to 2.0
private double entrySizeBytes_;
//Arrays
private byte[] keysArr_; //keys of zero are allowed
private long[] arrOfHllArr_;
private double[] invPow2SumHiArr_;
private double[] invPow2SumLoArr_;
private double[] hipEstAccumArr_;
private byte[] stateArr_;
/**
* Private constructor used to set all finals
* @param keySizeBytes size of key in bytes
* @param k size of HLL sketch
*/
private HllMap(final int keySizeBytes, final int k) {
super(keySizeBytes);
k_ = k;
hllArrLongs_ = (k / 10) + 1;
}
static HllMap getInstance(final int keySizeBytes, final int k) {
final HllMap map = new HllMap(keySizeBytes, k);
map.tableEntries_ = HLL_INIT_NUM_ENTRIES;
map.capacityEntries_ = (int)(HLL_INIT_NUM_ENTRIES * LOAD_FACTOR);
map.curCountEntries_ = 0;
map.growthFactor_ = HLL_RESIZE_FACTOR;
map.entrySizeBytes_ = updateEntrySizeBytes(map.tableEntries_, keySizeBytes, map.hllArrLongs_);
map.keysArr_ = new byte[HLL_INIT_NUM_ENTRIES * map.keySizeBytes_];
map.arrOfHllArr_ = new long[HLL_INIT_NUM_ENTRIES * map.hllArrLongs_];
map.invPow2SumHiArr_ = new double[HLL_INIT_NUM_ENTRIES];
map.invPow2SumLoArr_ = new double[HLL_INIT_NUM_ENTRIES];
map.hipEstAccumArr_ = new double[HLL_INIT_NUM_ENTRIES];
map.stateArr_ = new byte[HLL_INIT_NUM_ENTRIES_ARR_SIZE];
return map;
}
@Override
double update(final byte[] key, final short coupon) {
final int entryIndex = findOrInsertKey(key);
return update(entryIndex, coupon);
}
@Override
double update(final int entryIndex, final short coupon) {
updateHll(entryIndex, coupon); //update HLL array, updates HIP
return hipEstAccumArr_[entryIndex];
}
@Override
double getEstimate(final byte[] key) {
if (key == null) { return Double.NaN; }
final int entryIndex = findKey(key);
if (entryIndex < 0) {
return 0;
}
return hipEstAccumArr_[entryIndex];
}
@Override
double getUpperBound(final byte[] key) {
return getEstimate(key) * (1 + RSE);
}
@Override
double getLowerBound(final byte[] key) {
return getEstimate(key) * (1 - RSE);
}
@Override
void updateEstimate(final int entryIndex, final double estimate) {
hipEstAccumArr_[entryIndex] = estimate;
}
/**
* Returns the entry index for the given key given the array of keys, if found.
* Otherwise, returns the one's complement of first empty entry found;
* @param key the key to search for
* @return the entry index of the given key, or the one's complement of the index if not found.
*/
@Override
final int findKey(final byte[] key) {
final int keyLen = key.length;
final long[] hash = MurmurHash3.hash(key, SEED);
int entryIndex = getIndex(hash[0], tableEntries_);
final int stride = getStride(hash[1], tableEntries_);
final int loopIndex = entryIndex;
do {
if (isBitClear(stateArr_, entryIndex)) { //check if slot is empty
return ~entryIndex;
}
if (arraysEqual(key, 0, keysArr_, entryIndex * keyLen, keyLen)) { //check for key match
return entryIndex;
}
entryIndex = (entryIndex + stride) % tableEntries_;
} while (entryIndex != loopIndex);
throw new SketchesArgumentException("Key not found and no empty slots!");
}
@Override
int findOrInsertKey(final byte[] key) {
int entryIndex = findKey(key);
if (entryIndex < 0) { //key not found, initialize new row
entryIndex = ~entryIndex;
System.arraycopy(key, 0, keysArr_, entryIndex * keySizeBytes_, keySizeBytes_);
setBit(stateArr_, entryIndex);
invPow2SumHiArr_[entryIndex] = k_;
invPow2SumLoArr_[entryIndex] = 0;
hipEstAccumArr_[entryIndex] = 0;
curCountEntries_++;
if (curCountEntries_ > capacityEntries_) {
resize();
entryIndex = findKey(key);
assert entryIndex >= 0;
}
}
return entryIndex;
}
@Override
double getEntrySizeBytes() {
return entrySizeBytes_;
}
@Override
int getTableEntries() {
return tableEntries_;
}
@Override
int getCapacityEntries() {
return capacityEntries_;
}
@Override
int getCurrentCountEntries() {
return curCountEntries_;
}
@Override
long getMemoryUsageBytes() {
final long arrays = keysArr_.length
+ ((long) arrOfHllArr_.length * Long.BYTES)
+ ((long) invPow2SumLoArr_.length * Double.BYTES)
+ ((long) invPow2SumHiArr_.length * Double.BYTES)
+ ((long) hipEstAccumArr_.length * Double.BYTES)
+ stateArr_.length;
final long other = (5L * Integer.BYTES) + Float.BYTES + Double.BYTES;
return arrays + other;
}
@Override
CouponsIterator getCouponsIterator(final int index) {
// not applicable
return null;
}
@Override
int getMaxCouponsPerEntry() {
// not applicable
return 0;
}
@Override
int getCapacityCouponsPerEntry() {
// not applicable
return 0;
}
@Override
int getActiveEntries() {
return curCountEntries_;
}
@Override
int getDeletedEntries() {
return 0;
}
/**
* Find the first empty slot for the given key.
* Only used by resize, where it is known that the key does not exist in the table.
* Throws an exception if no empty slots.
* @param key the given key
* @param tableEntries prime size of table
* @param stateArr the valid bit array
* @return the first empty slot for the given key
*/
private static final int findEmpty(final byte[] key, final int tableEntries, final byte[] stateArr) {
final long[] hash = MurmurHash3.hash(key, SEED);
int entryIndex = getIndex(hash[0], tableEntries);
final int stride = getStride(hash[1], tableEntries);
final int loopIndex = entryIndex;
do {
if (isBitClear(stateArr, entryIndex)) { //check if slot is empty
return entryIndex;
}
entryIndex = (entryIndex + stride) % tableEntries;
} while (entryIndex != loopIndex);
throw new SketchesArgumentException("No empty slots.");
}
//This method is specifically tied to the HLL array layout
@SuppressFBWarnings(value = "IM_MULTIPLYING_RESULT_OF_IREM", justification = "False Positive")
private final boolean updateHll(final int entryIndex, final int coupon) {
final int newValue = coupon16Value(coupon);
final int hllIdx = coupon & (k_ - 1); //lower lgK bits
final int longIdx = hllIdx / 10;
final int shift = ((hllIdx % 10) * 6) & SIX_BIT_MASK;
long hllLong = arrOfHllArr_[(entryIndex * hllArrLongs_) + longIdx];
final int oldValue = (int)(hllLong >>> shift) & SIX_BIT_MASK;
if (newValue <= oldValue) { return false; }
// newValue > oldValue
//update hipEstAccum BEFORE updating invPow2Sum
final double invPow2Sum = invPow2SumHiArr_[entryIndex] + invPow2SumLoArr_[entryIndex];
final double oneOverQ = k_ / invPow2Sum;
hipEstAccumArr_[entryIndex] += oneOverQ;
//update invPow2Sum
if (oldValue < 32) { invPow2SumHiArr_[entryIndex] -= invPow2(oldValue); }
else { invPow2SumLoArr_[entryIndex] -= invPow2(oldValue); }
if (newValue < 32) { invPow2SumHiArr_[entryIndex] += invPow2(newValue); }
else { invPow2SumLoArr_[entryIndex] += invPow2(newValue); }
//insert the new value
hllLong &= ~(0X3FL << shift); //zero out the 6-bit field
hllLong |= ((long)newValue) << shift; //insert
arrOfHllArr_[(entryIndex * hllArrLongs_) + longIdx] = hllLong;
return true;
}
private final void resize() {
final int newTableEntries = nextPrime((int)(tableEntries_ * growthFactor_));
final int newCapacityEntries = (int)(newTableEntries * LOAD_FACTOR);
final byte[] newKeysArr = new byte[newTableEntries * keySizeBytes_];
final long[] newArrOfHllArr = new long[newTableEntries * hllArrLongs_];
final double[] newInvPow2Sum1 = new double[newTableEntries];
final double[] newInvPow2Sum2 = new double[newTableEntries];
final double[] newHipEstAccum = new double[newTableEntries];
final byte[] newStateArr = new byte[(int) Math.ceil(newTableEntries / 8.0)];
for (int oldIndex = 0; oldIndex < tableEntries_; oldIndex++) {
if (isBitClear(stateArr_, oldIndex)) { continue; }
// extract an old key
final byte[] key =
Arrays.copyOfRange(keysArr_, oldIndex * keySizeBytes_, (oldIndex + 1) * keySizeBytes_);
final int newIndex = findEmpty(key, newTableEntries, newStateArr);
System.arraycopy(key, 0, newKeysArr, newIndex * keySizeBytes_, keySizeBytes_); //put key
//put the rest of the row
System.arraycopy(arrOfHllArr_, oldIndex * hllArrLongs_, newArrOfHllArr,
newIndex * hllArrLongs_, hllArrLongs_);
newInvPow2Sum1[newIndex] = invPow2SumHiArr_[oldIndex];
newInvPow2Sum2[newIndex] = invPow2SumLoArr_[oldIndex];
newHipEstAccum[newIndex] = hipEstAccumArr_[oldIndex];
setBit(newStateArr, newIndex);
}
//restore into sketch
tableEntries_ = newTableEntries;
capacityEntries_ = newCapacityEntries;
//curCountEntries_, growthFactor_ unchanged
entrySizeBytes_ = updateEntrySizeBytes(tableEntries_, keySizeBytes_, hllArrLongs_);
keysArr_ = newKeysArr;
arrOfHllArr_ = newArrOfHllArr;
invPow2SumHiArr_ = newInvPow2Sum1; //init to k
invPow2SumLoArr_ = newInvPow2Sum2; //init to 0
hipEstAccumArr_ = newHipEstAccum; //init to 0
stateArr_ = newStateArr;
}
private static final double updateEntrySizeBytes(final int tableEntries, final int keySizeBytes,
final int hllArrLongs) {
final double byteFraction = Math.ceil(tableEntries / 8.0) / tableEntries;
return keySizeBytes + ((double) hllArrLongs * Long.BYTES) + (3.0 * Double.BYTES) + byteFraction;
}
}
| 2,616 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/hllmap/UniqueCountMap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.hllmap;
import org.apache.datasketches.common.SketchesArgumentException;
/**
* This is a real-time, key-value HLL mapping sketch that tracks approximate unique counts of
* identifiers (the values) associated with each key. An example might be tracking the number of
* unique user identifiers associated with each IP address. This map has been specifically designed
* for the use-case where the number of keys is quite large (many millions) and the distribution of
* identifiers per key is very skewed. A typical distribution where this works well is a
* power-law distribution of identifiers per key of the form <i>y = Cx<sup>-α</sup></i>,
* where <i>α</i> < 0.5, and <i>C</i> is roughly <i>y<sub>max</sub></i>.
* For example, with 100M keys, over 75% of the keys would have only
* one identifier, 99% of the keys would have less than 20 identifiers, 99.9% would have less than
* 200 identifiers, and a very tiny fraction might have identifiers in the thousands.
*
* <p>The space consumed by this map is quite sensitive to the actual distribution of identifiers
* per key, so you should characterize and or experiment with your typical input streams.
* Nonetheless, our experiments on live streams of over 100M keys required about 1.4GB of space.
*
* <p>Given such highly-skewed distributions, using this map is far more efficient space-wise than
* the alternative of dedicating an HLL sketch per key. Based on our use cases, after
* subtracting the space required for key storage, the average bytes per key required for unique
* count estimation ({@link #getAverageSketchMemoryPerKey()}) is about 10.
*
* <p>Internally, this map is implemented as a hierarchy of internal hash maps with progressively
* increasing storage allocated for unique count estimation. As a key acquires more identifiers it
* is "promoted" up to a higher internal map. The final map of keys is a map of compact HLL
* sketches.
*
* <p>The unique values in all the internal maps, except the final HLL map, are stored in a special
* form called a coupon. A coupon is a 16-bit value that fully describes a k=1024 HLL bin.
* It contains 10 bits of address and a 6-bit HLL value.
*
* <p>All internal maps use a prime number size and Knuth's Open Addressing Double Hash (OADH)
* search algorithm.
*
* <p>The internal base map holds all the keys and each key is associated with one 16-bit value.
* Initially, the value is a single coupon. Once the key is promoted, this 16-bit field contains a
* reference to the internal map where the key is still active.
*
* <p>The intermediate maps between the base map and the final HLL map are of two types.
* The first few of these are called traverse maps where the coupons are
* stored as unsorted arrays. After the traverse maps are the coupon hash maps, where the coupons
* are stored in small OASH hash tables.
*
* <p>All the intermediate maps support deletes and can dynamically grow and shrink as required by
* the input stream.
*
* <p>The sketch estimator algorithms are unbiased with a Relative Standard Error (RSE)
* of about 2.6% with 68% confidence, or equivalently, about 5.2% with a 95% confidence.
*
* <p>In a parallel package in the sketches-misc repository, there are 2 classes that can be used
* from the command line to feed this mapping sketch piped from standard-in for experimental
* evaluation. The first is ProcessIpStream, which processes simple IP/ID pairs and the second,
* ProcessDistributionStream, which processes pairs that describe a distribution.
* In this same package is the VariousMapRSETest class that was used to generate the error plots
* for the web site. Please refer to the javadocs for those classes for more information.
*
* @author Lee Rhodes
* @author Alexander Saydakov
* @author Kevin Lang
*/
public class UniqueCountMap {
private static final String LS = System.getProperty("line.separator");
private static final int NUM_LEVELS = 10; // total of single coupon + traverse + coupon maps + hll
private static final int NUM_TRAVERSE_MAPS = 3;
private static final int HLL_K = 1024;
private static final int INITIAL_NUM_ENTRIES = 1000003;
private static final int MIN_INITIAL_NUM_ENTRIES = 157;
private final int keySizeBytes_;
/** TraverseCouponMap or HashCouponMap instances */
private final Map[] maps_;
/**
* Constructs a UniqueCountMap with an initial capacity of one million entries.
* @param keySizeBytes must be at least 4 bytes to have sufficient entropy.
*/
public UniqueCountMap(final int keySizeBytes) {
this(INITIAL_NUM_ENTRIES, keySizeBytes);
}
/**
* Constructs a UniqueCountMap with a given initial number of entries.
*
* @param initialNumEntries The initial number of entries provides a tradeoff between
* wasted space, if too high, and wasted time resizing the table, if too low.
* @param keySizeBytes must be at least 4 bytes to have sufficient entropy
*/
public UniqueCountMap(final int initialNumEntries, final int keySizeBytes) {
checkConstructorKeySize(keySizeBytes);
final int initEntries = Math.max(initialNumEntries, MIN_INITIAL_NUM_ENTRIES);
keySizeBytes_ = keySizeBytes;
maps_ = new Map[NUM_LEVELS]; // includes base level and top level
maps_[0] = SingleCouponMap.getInstance(initEntries, keySizeBytes);
}
/**
* Updates the map with a given key and identifier and returns the estimate of the number of
* unique identifiers encountered so far for the given key.
* @param key the given key
* @param identifier the given identifier for unique counting associated with the key
* @return the estimate of the number of unique identifiers encountered so far for the given key.
*/
public double update(final byte[] key, final byte[] identifier) {
if (key == null) { return Double.NaN; }
checkMethodKeySize(key);
if (identifier == null) { return getEstimate(key); }
final short coupon = (short) Map.coupon16(identifier);
final int baseMapIndex = maps_[0].findOrInsertKey(key);
final double baseMapEstimate = maps_[0].update(baseMapIndex, coupon);
if (baseMapEstimate > 0) { return baseMapEstimate; }
final int level = -(int) baseMapEstimate; // base map is level 0
if (level == 0) {
return promote(key, coupon, maps_[0], baseMapIndex, level, baseMapIndex, 0);
}
final Map map = maps_[level];
final int index = map.findOrInsertKey(key);
final double estimate = map.update(index, coupon);
if (estimate > 0) { return estimate; }
return promote(key, coupon, map, index, level, baseMapIndex, -estimate);
}
/**
* Retrieves the current estimate of unique count for a given key.
* @param key given key
* @return estimate of unique count so far
*/
public double getEstimate(final byte[] key) {
if (key == null) { return Double.NaN; }
checkMethodKeySize(key);
final double est = maps_[0].getEstimate(key);
if (est >= 0.0) { return est; }
//key has been promoted
final int level = -(int)est;
final Map map = maps_[level];
return map.getEstimate(key);
}
/**
* Returns the upper bound cardinality with respect to {@link #getEstimate(byte[])} associated
* with the given key.
* @param key the given key
* @return the upper bound cardinality with respect to {@link #getEstimate(byte[])} associated
* with the given key.
*/
public double getUpperBound(final byte[] key) {
if (key == null) { return Double.NaN; }
checkMethodKeySize(key);
final double est = maps_[0].getEstimate(key);
if (est >= 0.0) { return est; }
//key has been promoted
final int level = -(int)est;
final Map map = maps_[level];
return map.getUpperBound(key);
}
/**
* Returns the lower bound cardinality with respect to {@link #getEstimate(byte[])} associated
* with the given key.
* @param key the given key
* @return the lower bound cardinality with respect to {@link #getEstimate(byte[])} associated
* with the given key.
*/
public double getLowerBound(final byte[] key) {
if (key == null) { return Double.NaN; }
checkMethodKeySize(key);
final double est = maps_[0].getEstimate(key);
if (est >= 0.0) { return est; }
//key has been promoted
final int level = -(int)est;
final Map map = maps_[level];
return map.getLowerBound(key);
}
/**
* Returns the number of active, unique keys across all internal maps
* @return the number of active, unique keys across all internal maps
*/
public int getActiveEntries() {
return maps_[0].getCurrentCountEntries();
}
/**
* Returns total bytes used by all internal maps
* @return total bytes used by all internal maps
*/
public long getMemoryUsageBytes() {
long total = 0;
for (int i = 0; i < maps_.length; i++) {
if (maps_[i] != null) {
total += maps_[i].getMemoryUsageBytes();
}
}
return total;
}
/**
* Returns total bytes used for key storage
* @return total bytes used for key storage
*/
public long getKeyMemoryUsageBytes() {
long total = 0;
for (int i = 0; i < maps_.length; i++) {
if (maps_[i] != null) {
total += (long) (maps_[i].getActiveEntries()) * keySizeBytes_;
}
}
return total;
}
/**
* Returns the average memory storage per key that is dedicated to sketching the unique counts.
* @return the average memory storage per key that is dedicated to sketching the unique counts.
*/
public double getAverageSketchMemoryPerKey() {
return (double) (getMemoryUsageBytes() - getKeyMemoryUsageBytes()) / getActiveEntries();
}
/**
* Returns the number of active internal maps so far.
* Only the base map is initialized in the constructor, so this method would return 1.
* As more keys are promoted up to higher level maps, the return value would grow until the
* last level HLL map is allocated.
* @return the number of active levels so far
*/
int getActiveMaps() {
int levels = 0;
final int iMapsLen = maps_.length;
for (int i = 0; i < iMapsLen; i++) {
if (maps_[i] != null) { levels++; }
}
return levels;
}
/**
* Returns the base map
* @return the base map
*/
Map getBaseMap() {
return maps_[0];
}
/**
* Returns the top-level HllMap. It may be null.
* @return the top-level HllMap.
*/
Map getHllMap() {
return maps_[maps_.length - 1];
}
/**
* Returns a string with a human-readable summary of the UniqueCountMap and all the internal maps
* @return human-readable summary
*/
@Override
public String toString() {
final long totKeys = getActiveEntries();
final long totMem = getMemoryUsageBytes();
final long keyMem = getKeyMemoryUsageBytes();
final double avgValMemPerKey = getAverageSketchMemoryPerKey();
final String ksb = Map.fmtLong(keySizeBytes_);
final String alvls = Map.fmtLong(getActiveMaps());
final String tKeys = Map.fmtLong(totKeys);
final String tMem = Map.fmtLong(totMem);
final String kMem = Map.fmtLong(keyMem);
final String avgValMem = Map.fmtDouble(avgValMemPerKey);
final StringBuilder sb = new StringBuilder();
final String thisSimpleName = this.getClass().getSimpleName();
sb.append("## ").append(thisSimpleName).append(" SUMMARY: ").append(LS);
sb.append(" Key Size Bytes : ").append(ksb).append(LS);
sb.append(" Active Map Levels : ").append(alvls).append(LS);
sb.append(" Total keys : ").append(tKeys).append(LS);
sb.append(" Total Memory Bytes : ").append(tMem).append(LS);
sb.append(" Total Key Memory Bytes : ").append(kMem).append(LS);
sb.append(" Avg Sketch Memory Bytes/Key: ").append(avgValMem).append(LS);
sb.append(LS);
for (int i = 0; i < maps_.length; i++) {
final Map cMap = maps_[i];
if (cMap != null) {
sb.append(cMap.toString());
sb.append(LS);
}
}
sb.append("## ").append("END UNIQUE COUNT MAP SUMMARY");
sb.append(LS);
return sb.toString();
}
private void setLevelInBaseMap(final int index, final int level) {
((SingleCouponMap) maps_[0]).setLevel(index, level);
}
private double promote(final byte[] key, final short coupon, final Map fromMap, final int fromIndex,
final int fromLevel, final int baseMapIndex, final double estimate) {
final Map newMap = getMapForLevel(fromLevel + 1);
final int newMapIndex = newMap.findOrInsertKey(key);
final CouponsIterator it = fromMap.getCouponsIterator(fromIndex);
while (it.next()) {
final double est = newMap.update(newMapIndex, it.getValue());
assert est > 0;
}
fromMap.deleteKey(fromIndex);
newMap.updateEstimate(newMapIndex, estimate);
final double newEstimate = newMap.update(newMapIndex, coupon);
setLevelInBaseMap(baseMapIndex, fromLevel + 1);
assert newEstimate > 0; // this must be positive since we have just promoted
return newEstimate;
}
private Map getMapForLevel(final int level) {
if (maps_[level] == null) {
final int newLevelCapacity = 1 << level;
if (level <= NUM_TRAVERSE_MAPS) {
maps_[level] = CouponTraverseMap.getInstance(keySizeBytes_, newLevelCapacity);
} else if (level < (maps_.length - 1)) {
maps_[level] = CouponHashMap.getInstance(keySizeBytes_, newLevelCapacity);
} else {
maps_[level] = HllMap.getInstance(keySizeBytes_, HLL_K);
}
}
return maps_[level];
}
private static final void checkConstructorKeySize(final int keySizeBytes) {
if (keySizeBytes < 4) {
throw new SketchesArgumentException("KeySizeBytes must be >= 4: " + keySizeBytes);
}
}
private final void checkMethodKeySize(final byte[] key) {
if (key.length != keySizeBytes_) {
throw new SketchesArgumentException("Key size must be " + keySizeBytes_ + " bytes.");
}
}
}
| 2,617 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/hllmap/CouponHashMap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.hllmap;
import static org.apache.datasketches.common.Util.checkIfIntPowerOf2;
import static org.apache.datasketches.common.Util.invPow2;
import java.util.Arrays;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.hash.MurmurHash3;
/**
* Implements a key-value map where the value is a hash map of coupons.
*
* <p>The outer map is implemented as a prime-sized, Open Address, Double Hash, with deletes, so
* this table can grow and shrink. Each entry row has a 1-byte count where 255 is a marker for
* "dirty" and zero is empty.
*
* <p>The inner hash tables are implemented with linear probing or OASH and a load factor of 0.75.
*
* @author Lee Rhodes
* @author Alexander Saydakov
* @author Kevin Lang
*/
final class CouponHashMap extends Map {
private static final double INNER_LOAD_FACTOR = 0.75;
private static final byte DELETED_KEY_MARKER = (byte) 255;
private static final int BYTE_MASK = 0XFF;
private static final int COUPON_K = 1024;
private static final double RSE = 0.408 / Math.sqrt(1024);
private final int maxCouponsPerKey_;
private final int capacityCouponsPerKey_;
private final int entrySizeBytes_;
private int tableEntries_;
private int capacityEntries_;
private int numActiveKeys_;
private int numDeletedKeys_;
//Arrays
private byte[] keysArr_;
private short[] couponsArr_;
private byte[] curCountsArr_; //also acts as a stateArr: 0 empty, 255 deleted
private float[] invPow2SumArr_;
private float[] hipEstAccumArr_;
private CouponHashMap(final int keySizeBytes, final int maxCouponsPerKey) {
super(keySizeBytes);
maxCouponsPerKey_ = maxCouponsPerKey;
capacityCouponsPerKey_ = (int)(maxCouponsPerKey * INNER_LOAD_FACTOR);
entrySizeBytes_ = keySizeBytes + (maxCouponsPerKey * Short.BYTES) + 1 + 4 + 4;
}
static CouponHashMap getInstance(final int keySizeBytes, final int maxCouponsPerKey) {
checkMaxCouponsPerKey(maxCouponsPerKey);
final int tableEntries = COUPON_MAP_MIN_NUM_ENTRIES;
final CouponHashMap map = new CouponHashMap(keySizeBytes, maxCouponsPerKey);
map.tableEntries_ = tableEntries;
map.capacityEntries_ = (int)(tableEntries * COUPON_MAP_GROW_TRIGGER_FACTOR);
map.numActiveKeys_ = 0;
map.numDeletedKeys_ = 0;
map.keysArr_ = new byte[tableEntries * keySizeBytes];
map.couponsArr_ = new short[tableEntries * maxCouponsPerKey];
map.curCountsArr_ = new byte[tableEntries];
map.invPow2SumArr_ = new float[tableEntries];
map.hipEstAccumArr_ = new float[tableEntries];
return map;
}
@Override
double update(final byte[] key, final short coupon) {
final int entryIndex = findOrInsertKey(key);
return update(entryIndex, coupon); //negative when time to promote
}
@Override
double update(final int entryIndex, final short coupon) {
final int couponMapArrEntryIndex = entryIndex * maxCouponsPerKey_;
int innerCouponIndex = (coupon & 0xFFFF) % maxCouponsPerKey_;
while (couponsArr_[couponMapArrEntryIndex + innerCouponIndex] != 0) {
if (couponsArr_[couponMapArrEntryIndex + innerCouponIndex] == coupon) {
return hipEstAccumArr_[entryIndex]; //duplicate, returns the estimate
}
innerCouponIndex = (innerCouponIndex + 1) % maxCouponsPerKey_; //linear search
}
if (((curCountsArr_[entryIndex] + 1) & BYTE_MASK) > capacityCouponsPerKey_) {
//returns the negative estimate, as signal to promote
return -hipEstAccumArr_[entryIndex];
}
couponsArr_[couponMapArrEntryIndex + innerCouponIndex] = coupon; //insert
curCountsArr_[entryIndex]++;
//hip += k/qt; qt -= 1/2^(val);
hipEstAccumArr_[entryIndex] += COUPON_K / invPow2SumArr_[entryIndex];
invPow2SumArr_[entryIndex] -= invPow2(coupon16Value(coupon));
return hipEstAccumArr_[entryIndex]; //returns the estimate
}
@Override
double getEstimate(final byte[] key) {
final int index = findKey(key);
if (index < 0) { return 0; }
return hipEstAccumArr_[index];
}
@Override
double getUpperBound(final byte[] key) {
return getEstimate(key) * (1 + RSE);
}
@Override
double getLowerBound(final byte[] key) {
return getEstimate(key) * (1 - RSE);
}
@Override
void updateEstimate(final int entryIndex, final double estimate) {
if (entryIndex < 0) {
throw new SketchesArgumentException("Key not found.");
}
hipEstAccumArr_[entryIndex] = (float) estimate;
}
/**
* Returns entryIndex if the given key is found. If not found, returns one's complement index
* of an empty slot for insertion, which may be over a deleted key.
* @param key the given key
* @return the entryIndex
*/
@Override
int findKey(final byte[] key) {
final long[] hash = MurmurHash3.hash(key, SEED);
int entryIndex = getIndex(hash[0], tableEntries_);
int firstDeletedIndex = -1;
final int loopIndex = entryIndex;
do {
if (curCountsArr_[entryIndex] == 0) {
return firstDeletedIndex == -1 ? ~entryIndex : ~firstDeletedIndex; // found empty or deleted
}
if (curCountsArr_[entryIndex] == DELETED_KEY_MARKER) {
if (firstDeletedIndex == -1) {
firstDeletedIndex = entryIndex;
}
} else if (Map.arraysEqual(keysArr_, entryIndex * keySizeBytes_, key, 0, keySizeBytes_)) {
return entryIndex; // found key
}
entryIndex = (entryIndex + getStride(hash[1], tableEntries_)) % tableEntries_;
} while (entryIndex != loopIndex);
throw new SketchesArgumentException("Key not found and no empty slots!");
}
@Override
int findOrInsertKey(final byte[] key) {
int entryIndex = findKey(key);
if (entryIndex < 0) { //key not found
entryIndex = ~entryIndex;
if (curCountsArr_[entryIndex] == DELETED_KEY_MARKER) { // reusing slot from a deleted key
Arrays.fill(couponsArr_, entryIndex * maxCouponsPerKey_,
(entryIndex + 1) * maxCouponsPerKey_, (short) 0);
curCountsArr_[entryIndex] = 0;
numDeletedKeys_--;
}
if ((numActiveKeys_ + numDeletedKeys_) >= capacityEntries_) {
resize();
entryIndex = ~findKey(key);
assert entryIndex >= 0;
}
//insert new key
System.arraycopy(key, 0, keysArr_, entryIndex * keySizeBytes_, keySizeBytes_);
//initialize HIP: qt <- k; hip <- 0;
invPow2SumArr_[entryIndex] = COUPON_K;
hipEstAccumArr_[entryIndex] = 0;
numActiveKeys_++;
}
return entryIndex;
}
@Override
void deleteKey(final int entryIndex) {
curCountsArr_[entryIndex] = DELETED_KEY_MARKER;
numActiveKeys_--;
numDeletedKeys_++;
if ((numActiveKeys_ > COUPON_MAP_MIN_NUM_ENTRIES)
&& (numActiveKeys_ < (tableEntries_ * COUPON_MAP_SHRINK_TRIGGER_FACTOR))) {
resize();
}
}
@Override
CouponsIterator getCouponsIterator(final int entryIndex) {
return new CouponsIterator(couponsArr_, entryIndex * maxCouponsPerKey_, maxCouponsPerKey_);
}
@Override
double getEntrySizeBytes() {
return entrySizeBytes_;
}
@Override
int getTableEntries() {
return tableEntries_;
}
@Override
int getCapacityEntries() {
return capacityEntries_;
}
@Override
int getCurrentCountEntries() {
return numActiveKeys_ + numDeletedKeys_;
}
@Override
long getMemoryUsageBytes() {
final long arrays = keysArr_.length
+ ((long) couponsArr_.length * Short.BYTES)
+ curCountsArr_.length
+ ((long) invPow2SumArr_.length * Float.BYTES)
+ ((long) hipEstAccumArr_.length * Float.BYTES);
final long other = 4 * 5;
return arrays + other;
}
@Override
int getActiveEntries() {
return numActiveKeys_;
}
@Override
int getDeletedEntries() {
return numDeletedKeys_;
}
@Override
int getMaxCouponsPerEntry() {
return maxCouponsPerKey_;
}
@Override
int getCapacityCouponsPerEntry() {
return capacityCouponsPerKey_;
}
private static final void checkMaxCouponsPerKey(final int maxCouponsPerKey) {
checkIfIntPowerOf2(maxCouponsPerKey, "maxCouponsPerKey");
final int cpk = maxCouponsPerKey;
if ((cpk < 16) || (cpk > 256)) {
throw new SketchesArgumentException(
"Required: 16 <= maxCouponsPerKey <= 256 : " + maxCouponsPerKey);
}
}
private void resize() {
final byte[] oldKeysArr = keysArr_;
final short[] oldCouponMapArr = couponsArr_;
final byte[] oldCurCountsArr = curCountsArr_;
final float[] oldInvPow2SumArr = invPow2SumArr_;
final float[] oldHipEstAccumArr = hipEstAccumArr_;
final int oldNumEntries = tableEntries_;
tableEntries_ = Math.max(
nextPrime((int) (numActiveKeys_ / COUPON_MAP_TARGET_FILL_FACTOR)),
COUPON_MAP_MIN_NUM_ENTRIES
);
capacityEntries_ = (int)(tableEntries_ * COUPON_MAP_GROW_TRIGGER_FACTOR);
keysArr_ = new byte[tableEntries_ * keySizeBytes_];
couponsArr_ = new short[tableEntries_ * maxCouponsPerKey_];
curCountsArr_ = new byte[tableEntries_];
invPow2SumArr_ = new float[tableEntries_];
hipEstAccumArr_ = new float[tableEntries_];
numActiveKeys_ = 0;
numDeletedKeys_ = 0;
for (int i = 0; i < oldNumEntries; i++) {
if ((oldCurCountsArr[i] != 0) && (oldCurCountsArr[i] != DELETED_KEY_MARKER)) {
//extract an old valid key
final byte[] key =
Arrays.copyOfRange(oldKeysArr, i * keySizeBytes_, (i * keySizeBytes_) + keySizeBytes_);
//insert the key and get its index
final int index = insertKey(key);
//copy the coupons array into that index
System.arraycopy(oldCouponMapArr, i * maxCouponsPerKey_, couponsArr_,
index * maxCouponsPerKey_, maxCouponsPerKey_);
//transfer the count
curCountsArr_[index] = oldCurCountsArr[i];
//transfer the HIP registers
invPow2SumArr_[index] = oldInvPow2SumArr[i];
hipEstAccumArr_[index] = oldHipEstAccumArr[i];
}
}
}
// for internal use by resize, no resize check and no deleted key check here
// no changes to HIP
private int insertKey(final byte[] key) {
final long[] hash = MurmurHash3.hash(key, SEED);
int entryIndex = getIndex(hash[0], tableEntries_);
final int loopIndex = entryIndex;
do {
if (curCountsArr_[entryIndex] == 0) {
System.arraycopy(key, 0, keysArr_, entryIndex * keySizeBytes_, keySizeBytes_);
numActiveKeys_++;
return entryIndex;
}
entryIndex = (entryIndex + getStride(hash[1], tableEntries_)) % tableEntries_;
} while (entryIndex != loopIndex);
throw new SketchesArgumentException("Key not found and no empty slots!");
}
}
| 2,618 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/hllmap/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* The hllmap package contains a space efficient HLL mapping sketch of keys to approximate unique
* count of identifiers. For example, counting the number of unique users (identifiers) per IP
* address.
*
* <p>In cases where the number of keys is very large, having an individual HLL sketch per key may
* not be practical. If the distribution of values per key is highly skewed where the vast
* majority of keys have only a few values then this mapping sketch will make sense as it will be
* far more space efficient than dedicating individual HLL sketches per key.
*
* <p>From our own testing, sketching 100 million IPv4 addresses with such a
* highly skewed distribution of identifiers per IP uses only 1.4GB of memory. This translates to
* an average of about 10 bytes per IP allocated to the equivalent of a full k=1024 HLL sketch
* and provides an RSE of less than 2.5%. Your results will vary depending on the actual
* distribution of identifiers per key.
*
* @see org.apache.datasketches.hllmap.UniqueCountMap
*/
package org.apache.datasketches.hllmap;
| 2,619 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/frequencies/ItemsSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.frequencies;
import static org.apache.datasketches.common.Util.LS;
import static org.apache.datasketches.common.Util.checkBounds;
import static org.apache.datasketches.common.Util.exactLog2OfInt;
import static org.apache.datasketches.common.Util.isIntPowerOf2;
import static org.apache.datasketches.frequencies.PreambleUtil.EMPTY_FLAG_MASK;
import static org.apache.datasketches.frequencies.PreambleUtil.SER_VER;
import static org.apache.datasketches.frequencies.PreambleUtil.extractActiveItems;
import static org.apache.datasketches.frequencies.PreambleUtil.extractFamilyID;
import static org.apache.datasketches.frequencies.PreambleUtil.extractFlags;
import static org.apache.datasketches.frequencies.PreambleUtil.extractLgCurMapSize;
import static org.apache.datasketches.frequencies.PreambleUtil.extractLgMaxMapSize;
import static org.apache.datasketches.frequencies.PreambleUtil.extractPreLongs;
import static org.apache.datasketches.frequencies.PreambleUtil.extractSerVer;
import static org.apache.datasketches.frequencies.PreambleUtil.insertActiveItems;
import static org.apache.datasketches.frequencies.PreambleUtil.insertFamilyID;
import static org.apache.datasketches.frequencies.PreambleUtil.insertFlags;
import static org.apache.datasketches.frequencies.PreambleUtil.insertLgCurMapSize;
import static org.apache.datasketches.frequencies.PreambleUtil.insertLgMaxMapSize;
import static org.apache.datasketches.frequencies.PreambleUtil.insertPreLongs;
import static org.apache.datasketches.frequencies.PreambleUtil.insertSerVer;
import static org.apache.datasketches.frequencies.Util.LG_MIN_MAP_SIZE;
import static org.apache.datasketches.frequencies.Util.SAMPLE_SIZE;
import java.lang.reflect.Array;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.Objects;
import org.apache.datasketches.common.ArrayOfItemsSerDe;
import org.apache.datasketches.common.Family;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.common.SketchesStateException;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
/**
* <p>This sketch is useful for tracking approximate frequencies of items of type <i><T></i>
* with optional associated counts (<i><T></i> item, <i>long</i> count) that are members of a
* multiset of such items. The true frequency of an item is defined to be the sum of associated
* counts.</p>
*
* <p>This implementation provides the following capabilities:</p>
* <ul>
* <li>Estimate the frequency of an item.</li>
* <li>Return upper and lower bounds of any item, such that the true frequency is always
* between the upper and lower bounds.</li>
* <li>Return a global maximum error that holds for all items in the stream.</li>
* <li>Return an array of frequent items that qualify either a NO_FALSE_POSITIVES or a
* NO_FALSE_NEGATIVES error type.</li>
* <li>Merge itself with another sketch object created from this class.</li>
* <li>Serialize/Deserialize to/from a byte array.</li>
* </ul>
*
* <p><b>Space Usage</b></p>
*
* <p>The sketch is initialized with a <i>maxMapSize</i> that specifies the maximum physical
* length of the internal hash map of the form (<i><T></i> item, <i>long</i> count).
* The <i>maxMapSize</i> must be a power of 2.</p>
*
* <p>The hash map starts at a very small size (8 entries), and grows as needed up to the
* specified <i>maxMapSize</i>.</p>
*
* <p>Excluding external space required for the item objects, the internal memory space usage of
* this sketch is 18 * <i>mapSize</i> bytes (assuming 8 bytes for each Java reference), plus a small
* constant number of additional bytes. The internal memory space usage of this sketch will never
* exceed 18 * <i>maxMapSize</i> bytes, plus a small constant number of additional bytes.</p>
*
* <p><b>Maximum Capacity of the Sketch</b></p>
*
* <p>The LOAD_FACTOR for the hash map is internally set at 75%,
* which means at any time the map capacity of (item, count) pairs is <i>mapCap</i> = 0.75 *
* <i>mapSize</i>.
* The maximum capacity of (item, count) pairs of the sketch is <i>maxMapCap</i> = 0.75 *
* <i>maxMapSize</i>.</p>
*
* <p><b>Updating the sketch with (item, count) pairs</b></p>
*
* <p>If the item is found in the hash map, the mapped count field (the "counter") is
* incremented by the incoming count, otherwise, a new counter "(item, count) pair" is
* created. If the number of tracked counters reaches the maximum capacity of the hash map
* the sketch decrements all of the counters (by an approximately computed median), and
* removes any non-positive counters.</p>
*
* <p><b>Accuracy</b></p>
*
* <p>If fewer than 0.75 * <i>maxMapSize</i> different items are inserted into the sketch the
* estimated frequencies returned by the sketch will be exact.</p>
*
* <p>The logic of the frequent items sketch is such that the stored counts and true counts are
* never too different.
* More specifically, for any <i>item</i>, the sketch can return an estimate of the
* true frequency of <i>item</i>, along with upper and lower bounds on the frequency
* (that hold deterministically).</p>
*
* <p>For this implementation and for a specific active <i>item</i>, it is guaranteed that
* the true frequency will be between the Upper Bound (UB) and the Lower Bound (LB) computed for
* that <i>item</i>. Specifically, <i>(UB- LB) ≤ W * epsilon</i>, where <i>W</i> denotes the
* sum of all item counts, and <i>epsilon = 3.5/M</i>, where <i>M</i> is the <i>maxMapSize</i>.</p>
*
* <p>This is a worst case guarantee that applies to arbitrary inputs.<sup>1</sup>
* For inputs typically seen in practice <i>(UB-LB)</i> is usually much smaller.
* </p>
*
* <p><b>Background</b></p>
*
* <p>This code implements a variant of what is commonly known as the "Misra-Gries
* algorithm". Variants of it were discovered and rediscovered and redesigned several times
* over the years:</p>
* <ul><li>"Finding repeated elements", Misra, Gries, 1982</li>
* <li>"Frequency estimation of Internet packet streams with limited space" Demaine,
* Lopez-Ortiz, Munro, 2002</li>
* <li>"A simple algorithm for finding frequent elements in streams and bags" Karp, Shenker,
* Papadimitriou, 2003</li>
* <li>"Efficient Computation of Frequent and Top-k Elements in Data Streams" Metwally,
* Agrawal, Abbadi, 2006</li>
* </ul>
*
* <sup>1</sup> For speed we do employ some randomization that introduces a small probability that
* our proof of the worst-case bound might not apply to a given run. However, we have ensured
* that this probability is extremely small. For example, if the stream causes one table purge
* (rebuild), our proof of the worst case bound applies with probability at least 1 - 1E-14.
* If the stream causes 1E9 purges, our proof applies with probability at least 1 - 1E-5.
*
* @param <T> The type of item to be tracked by this sketch
*
* @author Justin Thaler
* @author Alexander Saydakov
*/
public class ItemsSketch<T> {
/**
* Log2 Maximum length of the arrays internal to the hash map supported by the data
* structure.
*/
private int lgMaxMapSize;
/**
* The current number of counters supported by the hash map.
*/
private int curMapCap; //the threshold to purge
/**
* Tracks the total of decremented counts.
*/
private long offset;
/**
* The sum of all frequencies of the stream so far.
*/
private long streamWeight = 0;
/**
* The maximum number of samples used to compute approximate median of counters when doing
* decrement
*/
private int sampleSize;
/**
* Hash map mapping stored items to approximate counts
*/
private ReversePurgeItemHashMap<T> hashMap;
/**
* Construct this sketch with the parameter maxMapSize and the default initialMapSize (8).
*
* @param maxMapSize Determines the physical size of the internal hash map managed by this
* sketch and must be a power of 2. The maximum capacity of this internal hash map is
* 0.75 times * maxMapSize. Both the ultimate accuracy and size of this sketch are
* functions of maxMapSize.
*/
public ItemsSketch(final int maxMapSize) {
this(exactLog2OfInt(maxMapSize, "maxMapSize"), LG_MIN_MAP_SIZE);
}
/**
* Construct this sketch with parameter lgMaxMapSize and lgCurMapSize. This internal
* constructor is used when deserializing the sketch.
*
* @param lgMaxMapSize Log2 of the physical size of the internal hash map managed by this
* sketch. The maximum capacity of this internal hash map is 0.75 times 2^lgMaxMapSize.
* Both the ultimate accuracy and size of this sketch are functions of lgMaxMapSize.
*
* @param lgCurMapSize Log2 of the starting (current) physical size of the internal hash
* map managed by this sketch.
*/
ItemsSketch(final int lgMaxMapSize, final int lgCurMapSize) {
//set initial size of hash map
this.lgMaxMapSize = Math.max(lgMaxMapSize, LG_MIN_MAP_SIZE);
final int lgCurMapSz = Math.max(lgCurMapSize, LG_MIN_MAP_SIZE);
hashMap = new ReversePurgeItemHashMap<>(1 << lgCurMapSz);
this.curMapCap = hashMap.getCapacity();
final int maxMapCap =
(int) ((1 << lgMaxMapSize) * ReversePurgeItemHashMap.getLoadFactor());
offset = 0;
sampleSize = Math.min(SAMPLE_SIZE, maxMapCap);
}
/**
* Returns a sketch instance of this class from the given srcMem,
* which must be a Memory representation of this sketch class.
*
* @param <T> The type of item that this sketch will track
* @param srcMem a Memory representation of a sketch of this class.
* <a href="{@docRoot}/resources/dictionary.html#mem">See Memory</a>
* @param serDe an instance of ArrayOfItemsSerDe
* @return a sketch instance of this class.
*/
public static <T> ItemsSketch<T> getInstance(final Memory srcMem,
final ArrayOfItemsSerDe<T> serDe) {
Objects.requireNonNull(srcMem, "srcMem must not be null.");
Objects.requireNonNull(serDe, "serDe must not be null.");
final long pre0 = PreambleUtil.checkPreambleSize(srcMem); //make sure preamble will fit
final int maxPreLongs = Family.FREQUENCY.getMaxPreLongs();
final int preLongs = extractPreLongs(pre0); //Byte 0
final int serVer = extractSerVer(pre0); //Byte 1
final int familyID = extractFamilyID(pre0); //Byte 2
final int lgMaxMapSize = extractLgMaxMapSize(pre0); //Byte 3
final int lgCurMapSize = extractLgCurMapSize(pre0); //Byte 4
final boolean empty = (extractFlags(pre0) & EMPTY_FLAG_MASK) != 0; //Byte 5
// Checks
final boolean preLongsEq1 = (preLongs == 1); //Byte 0
final boolean preLongsEqMax = (preLongs == maxPreLongs);
if (!preLongsEq1 && !preLongsEqMax) {
throw new SketchesArgumentException(
"Possible Corruption: PreLongs must be 1 or " + maxPreLongs + ": " + preLongs);
}
if (serVer != SER_VER) { //Byte 1
throw new SketchesArgumentException(
"Possible Corruption: Ser Ver must be " + SER_VER + ": " + serVer);
}
final int actFamID = Family.FREQUENCY.getID(); //Byte 2
if (familyID != actFamID) {
throw new SketchesArgumentException(
"Possible Corruption: FamilyID must be " + actFamID + ": " + familyID);
}
if (empty ^ preLongsEq1) { //Byte 5 and Byte 0
throw new SketchesArgumentException(
"Possible Corruption: (PreLongs == 1) ^ Empty == True.");
}
if (empty) {
return new ItemsSketch<>(lgMaxMapSize, LG_MIN_MAP_SIZE);
}
//get full preamble
final long[] preArr = new long[preLongs];
srcMem.getLongArray(0, preArr, 0, preLongs);
final ItemsSketch<T> fis = new ItemsSketch<>(lgMaxMapSize, lgCurMapSize);
fis.streamWeight = 0; //update after
fis.offset = preArr[3];
final int preBytes = preLongs << 3;
final int activeItems = extractActiveItems(preArr[1]);
//Get countArray
final long[] countArray = new long[activeItems];
final int reqBytes = preBytes + activeItems * Long.BYTES; //count Arr only
checkBounds(0, reqBytes, srcMem.getCapacity()); //check Memory capacity
srcMem.getLongArray(preBytes, countArray, 0, activeItems);
//Get itemArray
final int itemsOffset = preBytes + (Long.BYTES * activeItems);
final T[] itemArray = serDe.deserializeFromMemory(
srcMem.region(itemsOffset, srcMem.getCapacity() - itemsOffset), 0, activeItems);
//update the sketch
for (int i = 0; i < activeItems; i++) {
fis.update(itemArray[i], countArray[i]);
}
fis.streamWeight = preArr[2]; //override streamWeight due to updating
return fis;
}
/**
* Returns the estimated <i>a priori</i> error given the maxMapSize for the sketch and the
* estimatedTotalStreamWeight.
* @param maxMapSize the planned map size to be used when constructing this sketch.
* @param estimatedTotalStreamWeight the estimated total stream weight.
* @return the estimated <i>a priori</i> error.
*/
public static double getAprioriError(final int maxMapSize, final long estimatedTotalStreamWeight) {
return getEpsilon(maxMapSize) * estimatedTotalStreamWeight;
}
/**
* Returns the current number of counters the sketch is configured to support.
*
* @return the current number of counters the sketch is configured to support.
*/
public int getCurrentMapCapacity() {
return this.curMapCap;
}
/**
* Returns epsilon used to compute <i>a priori</i> error.
* This is just the value <i>3.5 / maxMapSize</i>.
* @param maxMapSize the planned map size to be used when constructing this sketch.
* @return epsilon used to compute <i>a priori</i> error.
*/
public static double getEpsilon(final int maxMapSize) {
if (!isIntPowerOf2(maxMapSize)) {
throw new SketchesArgumentException("maxMapSize is not a power of 2.");
}
return 3.5 / maxMapSize;
}
/**
* Gets the estimate of the frequency of the given item.
* Note: The true frequency of a item would be the sum of the counts as a result of the
* two update functions.
*
* @param item the given item
* @return the estimate of the frequency of the given item
*/
public long getEstimate(final T item) {
// If item is tracked:
// Estimate = itemCount + offset; Otherwise it is 0.
final long itemCount = hashMap.get(item);
return (itemCount > 0) ? itemCount + offset : 0;
}
/**
* Gets the guaranteed lower bound frequency of the given item, which can never be
* negative.
*
* @param item the given item.
* @return the guaranteed lower bound frequency of the given item. That is, a number which
* is guaranteed to be no larger than the real frequency.
*/
public long getLowerBound(final T item) {
//LB = itemCount or 0
return hashMap.get(item);
}
/**
* Returns an array of Rows that include frequent items, estimates, upper and lower bounds
* given a threshold and an ErrorCondition. If the threshold is lower than getMaximumError(),
* then getMaximumError() will be used instead.
*
* <p>The method first examines all active items in the sketch (items that have a counter).
*
* <p>If <i>ErrorType = NO_FALSE_NEGATIVES</i>, this will include an item in the result
* list if getUpperBound(item) > threshold.
* There will be no false negatives, i.e., no Type II error.
* There may be items in the set with true frequencies less than the threshold
* (false positives).</p>
*
* <p>If <i>ErrorType = NO_FALSE_POSITIVES</i>, this will include an item in the result
* list if getLowerBound(item) > threshold.
* There will be no false positives, i.e., no Type I error.
* There may be items omitted from the set with true frequencies greater than the
* threshold (false negatives).</p>
*
* @param threshold to include items in the result list
* @param errorType determines whether no false positives or no false negatives are
* desired.
* @return an array of frequent items
*/
public Row<T>[] getFrequentItems(final long threshold, final ErrorType errorType) {
return sortItems(threshold > getMaximumError() ? threshold : getMaximumError(), errorType);
}
/**
* Returns an array of Rows that include frequent items, estimates, upper and lower bounds
* given an ErrorCondition and the default threshold.
* This is the same as getFrequentItems(getMaximumError(), errorType)
*
* @param errorType determines whether no false positives or no false negatives are
* desired.
* @return an array of frequent items
*/
public Row<T>[] getFrequentItems(final ErrorType errorType) {
return sortItems(getMaximumError(), errorType);
}
/**
* @return An upper bound on the maximum error of getEstimate(item) for any item.
* This is equivalent to the maximum distance between the upper bound and the lower bound
* for any item.
*/
public long getMaximumError() {
return offset;
}
/**
* Returns the maximum number of counters the sketch is configured to support.
*
* @return the maximum number of counters the sketch is configured to support.
*/
public int getMaximumMapCapacity() {
return (int) ((1 << lgMaxMapSize) * ReversePurgeLongHashMap.getLoadFactor());
}
/**
* @return the number of active items in the sketch.
*/
public int getNumActiveItems() {
return hashMap.getNumActive();
}
/**
* Returns the sum of the frequencies in the stream seen so far by the sketch
*
* @return the sum of the frequencies in the stream seen so far by the sketch
*/
public long getStreamLength() {
return this.streamWeight;
}
/**
* Gets the guaranteed upper bound frequency of the given item.
*
* @param item the given item
* @return the guaranteed upper bound frequency of the given item. That is, a number which
* is guaranteed to be no smaller than the real frequency.
*/
public long getUpperBound(final T item) {
// UB = itemCount + offset
return hashMap.get(item) + offset;
}
/**
* Returns true if this sketch is empty
*
* @return true if this sketch is empty
*/
public boolean isEmpty() {
return getNumActiveItems() == 0;
}
/**
* This function merges the other sketch into this one.
* The other sketch may be of a different size.
*
* @param other sketch of this class
* @return a sketch whose estimates are within the guarantees of the
* largest error tolerance of the two merged sketches.
*/
public ItemsSketch<T> merge(final ItemsSketch<T> other) {
if (other == null) { return this; }
if (other.isEmpty()) { return this; }
final long streamLen = this.streamWeight + other.streamWeight; //capture before merge
final ReversePurgeItemHashMap.Iterator<T> iter = other.hashMap.iterator();
while (iter.next()) { //this may add to offset during rebuilds
this.update(iter.getKey(), iter.getValue());
}
this.offset += other.offset;
this.streamWeight = streamLen; //corrected streamWeight
return this;
}
/**
* Resets this sketch to a virgin state.
*/
public void reset() {
hashMap = new ReversePurgeItemHashMap<>(1 << LG_MIN_MAP_SIZE);
this.curMapCap = hashMap.getCapacity();
this.offset = 0;
this.streamWeight = 0;
}
//Serialization
/**
* Returns a byte array representation of this sketch
* @param serDe an instance of ArrayOfItemsSerDe
* @return a byte array representation of this sketch
*/
public byte[] toByteArray(final ArrayOfItemsSerDe<T> serDe) {
final int preLongs;
final int outBytes;
final boolean empty = isEmpty();
final int activeItems = getNumActiveItems();
byte[] bytes = null;
if (empty) {
preLongs = 1;
outBytes = 8;
} else {
preLongs = Family.FREQUENCY.getMaxPreLongs();
bytes = serDe.serializeToByteArray(hashMap.getActiveKeys());
outBytes = ((preLongs + activeItems) << 3) + bytes.length;
}
final byte[] outArr = new byte[outBytes];
final WritableMemory mem = WritableMemory.writableWrap(outArr);
// build first preLong empty or not
long pre0 = 0L;
pre0 = insertPreLongs(preLongs, pre0); //Byte 0
pre0 = insertSerVer(SER_VER, pre0); //Byte 1
pre0 = insertFamilyID(Family.FREQUENCY.getID(), pre0); //Byte 2
pre0 = insertLgMaxMapSize(lgMaxMapSize, pre0); //Byte 3
pre0 = insertLgCurMapSize(hashMap.getLgLength(), pre0); //Byte 4
pre0 = empty ? insertFlags(EMPTY_FLAG_MASK, pre0) : insertFlags(0, pre0); //Byte 5
if (empty) {
mem.putLong(0, pre0);
} else {
final long pre = 0;
final long[] preArr = new long[preLongs];
preArr[0] = pre0;
preArr[1] = insertActiveItems(activeItems, pre);
preArr[2] = this.streamWeight;
preArr[3] = this.offset;
mem.putLongArray(0, preArr, 0, preLongs);
final int preBytes = preLongs << 3;
mem.putLongArray(preBytes, hashMap.getActiveValues(), 0, activeItems);
mem.putByteArray(preBytes + (this.getNumActiveItems() << 3), bytes, 0, bytes.length);
}
return outArr;
}
/**
* Returns a human readable summary of this sketch.
* @return a human readable summary of this sketch.
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("FrequentItemsSketch<T>:").append(LS);
sb.append(" Stream Length : " + streamWeight).append(LS);
sb.append(" Max Error Offset : " + offset).append(LS);
sb.append(hashMap.toString());
return sb.toString();
}
/**
* Returns a human readable string of the preamble of a byte array image of a ItemsSketch.
* @param byteArr the given byte array
* @return a human readable string of the preamble of a byte array image of a ItemsSketch.
*/
public static String toString(final byte[] byteArr) {
return toString(Memory.wrap(byteArr));
}
/**
* Returns a human readable string of the preamble of a Memory image of a ItemsSketch.
* @param mem the given Memory object
* @return a human readable string of the preamble of a Memory image of a ItemsSketch.
*/
public static String toString(final Memory mem) {
return PreambleUtil.preambleToString(mem);
}
/**
* Update this sketch with an item and a frequency count of one.
* @param item for which the frequency should be increased.
*/
public void update(final T item) {
update(item, 1);
}
/**
* Update this sketch with an item and a positive frequency count.
* @param item for which the frequency should be increased. The sketch uses
* hashCode() and equals() methods of the type T.
* @param count the amount by which the frequency of the item should be increased.
* A count of zero is a no-op, and a negative count will throw an exception.
*/
public void update(final T item, final long count) {
if ((item == null) || (count == 0)) {
return;
}
if (count < 0) {
throw new SketchesArgumentException("Count may not be negative");
}
this.streamWeight += count;
hashMap.adjustOrPutValue(item, count);
if (getNumActiveItems() > curMapCap) { //over the threshold, we need to do something
if (hashMap.getLgLength() < lgMaxMapSize) { //below tgt size, we can grow
hashMap.resize(2 * hashMap.getLength());
curMapCap = hashMap.getCapacity();
} else { //At tgt size, must purge
offset += hashMap.purge(sampleSize);
if (getNumActiveItems() > getMaximumMapCapacity()) {
throw new SketchesStateException("Purge did not reduce active items.");
}
}
}
}
/**
* Row class that defines the return values from a getFrequentItems query.
* @param <T> type of item
*/
public static class Row<T> implements Comparable<Row<T>> {
final T item;
final long est;
final long ub;
final long lb;
private static final String FMT = " %12d%12d%12d %s";
private static final String HFMT = " %12s%12s%12s %s";
Row(final T item, final long estimate, final long ub, final long lb) {
this.item = item;
this.est = estimate;
this.ub = ub;
this.lb = lb;
}
/**
* @return item of type T
*/
public T getItem() { return item; }
/**
* @return the estimate
*/
public long getEstimate() { return est; }
/**
* @return the upper bound
*/
public long getUpperBound() { return ub; }
/**
* @return return the lower bound
*/
public long getLowerBound() { return lb; }
/**
* @return the descriptive row header
*/
public static String getRowHeader() {
return String.format(HFMT,"Est", "UB", "LB", "Item");
}
@Override
public String toString() {
return String.format(FMT, est, ub, lb, item.toString());
}
/**
* This compareTo is strictly limited to the Row.getEstimate() value and does not imply any
* ordering whatsoever to the other elements of the row: item and upper and lower bounds.
* Defined this way, this compareTo will be consistent with hashCode() and equals(Object).
* @param that the other row to compare to.
* @return a negative integer, zero, or a positive integer as this.getEstimate() is less than,
* equal to, or greater than that.getEstimate().
*/
@Override
public int compareTo(final Row<T> that) {
return (this.est < that.est) ? -1 : (this.est > that.est) ? 1 : 0;
}
/**
* This hashCode is computed only from the Row.getEstimate() value.
* Defined this way, this hashCode will be consistent with equals(Object):<br>
* If (x.equals(y)) implies: x.hashCode() == y.hashCode().<br>
* If (!x.equals(y)) does NOT imply: x.hashCode() != y.hashCode().
* @return the hashCode computed from getEstimate().
*/
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = (prime * result) + (int) (est ^ (est >>> 32));
return result;
}
/**
* This equals is computed only from the Row.getEstimate() value and does not imply equality
* of the other items within the row: item and upper and lower bounds.
* Defined this way, this equals will be consistent with compareTo(Row).
* @param obj the other row to determine equality with.
* @return true if this.getEstimate() equals ((Row<T>)obj).getEstimate().
*/
@SuppressWarnings("unchecked")
@Override
public boolean equals(final Object obj) {
if (this == obj) { return true; }
if (obj == null) { return false; }
if ( !(obj instanceof Row)) { return false; }
final Row<T> that = (Row<T>) obj;
if (est != that.est) { return false; }
return true;
}
} //End of class Row<T>
Row<T>[] sortItems(final long threshold, final ErrorType errorType) {
final ArrayList<Row<T>> rowList = new ArrayList<>();
final ReversePurgeItemHashMap.Iterator<T> iter = hashMap.iterator();
if (errorType == ErrorType.NO_FALSE_NEGATIVES) {
while (iter.next()) {
final long est = getEstimate(iter.getKey());
final long ub = getUpperBound(iter.getKey());
final long lb = getLowerBound(iter.getKey());
if (ub >= threshold) {
final Row<T> row = new Row<>(iter.getKey(), est, ub, lb);
rowList.add(row);
}
}
} else { //NO_FALSE_POSITIVES
while (iter.next()) {
final long est = getEstimate(iter.getKey());
final long ub = getUpperBound(iter.getKey());
final long lb = getLowerBound(iter.getKey());
if (lb >= threshold) {
final Row<T> row = new Row<>(iter.getKey(), est, ub, lb);
rowList.add(row);
}
}
}
// descending order
rowList.sort(new Comparator<Row<T>>() {
@Override
public int compare(final Row<T> r1, final Row<T> r2) {
return r2.compareTo(r1);
}
});
@SuppressWarnings("unchecked")
final Row<T>[] rowsArr =
rowList.toArray((Row<T>[]) Array.newInstance(Row.class, rowList.size()));
return rowsArr;
}
}
| 2,620 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/frequencies/Util.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.frequencies;
final class Util {
private Util() {}
/**
* The following constant controls the size of the initial data structure for the
* frequencies sketches and its value is somewhat arbitrary.
*/
static final int LG_MIN_MAP_SIZE = 3;
/**
* This constant is large enough so that computing the median of SAMPLE_SIZE
* randomly selected entries from a list of numbers and outputting
* the empirical median will give a constant-factor approximation to the
* true median with high probability.
*/
static final int SAMPLE_SIZE = 1024;
/**
* @param key to be hashed
* @return an index into the hash table This hash function is taken from the internals of
* Austin Appleby's MurmurHash3 algorithm. It is also used by the Trove for Java libraries.
*/
static long hash(long key) {
key ^= key >>> 33;
key *= 0xff51afd7ed558ccdL;
key ^= key >>> 33;
key *= 0xc4ceb9fe1a85ec53L;
key ^= key >>> 33;
return key;
}
}
| 2,621 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/frequencies/LongsSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.frequencies;
import static org.apache.datasketches.common.Util.LS;
import static org.apache.datasketches.common.Util.checkBounds;
import static org.apache.datasketches.common.Util.exactLog2OfInt;
import static org.apache.datasketches.common.Util.isIntPowerOf2;
import static org.apache.datasketches.frequencies.PreambleUtil.EMPTY_FLAG_MASK;
import static org.apache.datasketches.frequencies.PreambleUtil.SER_VER;
import static org.apache.datasketches.frequencies.PreambleUtil.extractActiveItems;
import static org.apache.datasketches.frequencies.PreambleUtil.extractFamilyID;
import static org.apache.datasketches.frequencies.PreambleUtil.extractFlags;
import static org.apache.datasketches.frequencies.PreambleUtil.extractLgCurMapSize;
import static org.apache.datasketches.frequencies.PreambleUtil.extractLgMaxMapSize;
import static org.apache.datasketches.frequencies.PreambleUtil.extractPreLongs;
import static org.apache.datasketches.frequencies.PreambleUtil.extractSerVer;
import static org.apache.datasketches.frequencies.PreambleUtil.insertActiveItems;
import static org.apache.datasketches.frequencies.PreambleUtil.insertFamilyID;
import static org.apache.datasketches.frequencies.PreambleUtil.insertFlags;
import static org.apache.datasketches.frequencies.PreambleUtil.insertLgCurMapSize;
import static org.apache.datasketches.frequencies.PreambleUtil.insertLgMaxMapSize;
import static org.apache.datasketches.frequencies.PreambleUtil.insertPreLongs;
import static org.apache.datasketches.frequencies.PreambleUtil.insertSerVer;
import static org.apache.datasketches.frequencies.Util.LG_MIN_MAP_SIZE;
import static org.apache.datasketches.frequencies.Util.SAMPLE_SIZE;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.Objects;
import org.apache.datasketches.common.Family;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.common.SketchesStateException;
import org.apache.datasketches.common.SuppressFBWarnings;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
/**
* <p>This sketch is useful for tracking approximate frequencies of <i>long</i> items with optional
* associated counts (<i>long</i> item, <i>long</i> count) that are members of a multiset of
* such items. The true frequency of an item is defined to be the sum of associated counts.</p>
*
* <p>This implementation provides the following capabilities:</p>
* <ul>
* <li>Estimate the frequency of an item.</li>
* <li>Return upper and lower bounds of any item, such that the true frequency is always
* between the upper and lower bounds.</li>
* <li>Return a global maximum error that holds for all items in the stream.</li>
* <li>Return an array of frequent items that qualify either a NO_FALSE_POSITIVES or a
* NO_FALSE_NEGATIVES error type.</li>
* <li>Merge itself with another sketch object created from this class.</li>
* <li>Serialize/Deserialize to/from a String or byte array.</li>
* </ul>
*
* <p><b>Space Usage</b></p>
*
* <p>The sketch is initialized with a <i>maxMapSize</i> that specifies the maximum physical
* length of the internal hash map of the form (<i>long</i> item, <i>long</i> count).
* The <i>maxMapSize</i> must be a power of 2.</p>
*
* <p>The hash map starts at a very small size (8 entries), and grows as needed up to the
* specified <i>maxMapSize</i>.</p>
*
* <p>At any moment the internal memory space usage of this sketch is 18 * <i>mapSize</i> bytes,
* plus a small constant number of additional bytes. The maximum internal memory space usage of
* this sketch will never exceed 18 * <i>maxMapSize</i> bytes, plus a small constant number of
* additional bytes.</p>
*
* <p><b>Maximum Capacity of the Sketch</b></p>
*
* <p>The LOAD_FACTOR for the hash map is internally set at 75%,
* which means at any time the map capacity of (item, count) pairs is <i>mapCap</i> =
* 0.75 * <i>mapSize</i>.
* The maximum capacity of (item, count) pairs of the sketch is <i>maxMapCap</i> =
* 0.75 * <i>maxMapSize</i>.</p>
*
* <p><b>Updating the sketch with (item, count) pairs</b></p>
*
* <p>If the item is found in the hash map, the mapped count field (the "counter") is
* incremented by the incoming count, otherwise, a new counter "(item, count) pair" is
* created. If the number of tracked counters reaches the maximum capacity of the hash map
* the sketch decrements all of the counters (by an approximately computed median), and
* removes any non-positive counters.</p>
*
* <p><b>Accuracy</b></p>
*
* <p>If fewer than 0.75 * <i>maxMapSize</i> different items are inserted into the sketch the
* estimated frequencies returned by the sketch will be exact.</p>
*
* <p>The logic of the frequent items sketch is such that the stored counts and true counts are
* never too different.
* More specifically, for any <i>item</i>, the sketch can return an estimate of the
* true frequency of <i>item</i>, along with upper and lower bounds on the frequency
* (that hold deterministically).</p>
*
* <p>For this implementation and for a specific active <i>item</i>, it is guaranteed that
* the true frequency will be between the Upper Bound (UB) and the Lower Bound (LB) computed for
* that <i>item</i>. Specifically, <i>(UB- LB) ≤ W * epsilon</i>, where <i>W</i> denotes the
* sum of all item counts, and <i>epsilon = 3.5/M</i>, where <i>M</i> is the <i>maxMapSize</i>.</p>
*
* <p>This is a worst case guarantee that applies to arbitrary inputs.<sup>1</sup>
* For inputs typically seen in practice <i>(UB-LB)</i> is usually much smaller.
* </p>
*
* <p><b>Background</b></p>
*
* <p>This code implements a variant of what is commonly known as the "Misra-Gries
* algorithm". Variants of it were discovered and rediscovered and redesigned several times
* over the years:</p>
* <ul><li>"Finding repeated elements", Misra, Gries, 1982</li>
* <li>"Frequency estimation of Internet packet streams with limited space" Demaine,
* Lopez-Ortiz, Munro, 2002</li>
* <li>"A simple algorithm for finding frequent elements in streams and bags" Karp, Shenker,
* Papadimitriou, 2003</li>
* <li>"Efficient Computation of Frequent and Top-k Elements in Data Streams" Metwally,
* Agrawal, Abbadi, 2006</li>
* </ul>
*
* <sup>1</sup> For speed we do employ some randomization that introduces a small probability that
* our proof of the worst-case bound might not apply to a given run. However, we have ensured
* that this probability is extremely small. For example, if the stream causes one table purge
* (rebuild), our proof of the worst case bound applies with probability at least 1 - 1E-14.
* If the stream causes 1E9 purges, our proof applies with probability at least 1 - 1E-5.
*
* @author Justin Thaler
* @author Lee Rhodes
*/
@SuppressFBWarnings(value = "SIC_INNER_SHOULD_BE_STATIC_ANON", justification = "Harmless, fix later")
public class LongsSketch {
private static final int STR_PREAMBLE_TOKENS = 6;
/**
* Log2 Maximum length of the arrays internal to the hash map supported by the data
* structure.
*/
private int lgMaxMapSize;
/**
* The current number of counters supported by the hash map.
*/
private int curMapCap; //the threshold to purge
/**
* Tracks the total of decremented counts.
*/
private long offset;
/**
* The sum of all frequencies of the stream so far.
*/
private long streamWeight = 0;
/**
* The maximum number of samples used to compute approximate median of counters when doing
* decrement
*/
private int sampleSize;
/**
* Hash map mapping stored items to approximate counts
*/
private ReversePurgeLongHashMap hashMap;
/**
* Construct this sketch with the parameter maxMapSize and the default initialMapSize (8).
*
* @param maxMapSize Determines the physical size of the internal hash map managed by this
* sketch and must be a power of 2. The maximum capacity of this internal hash map is
* 0.75 times * maxMapSize. Both the ultimate accuracy and size of this sketch are a
* function of maxMapSize.
*/
public LongsSketch(final int maxMapSize) {
this(exactLog2OfInt(maxMapSize, "maxMapSize"), LG_MIN_MAP_SIZE);
}
/**
* Construct this sketch with parameter lgMapMapSize and lgCurMapSize. This internal
* constructor is used when deserializing the sketch.
*
* @param lgMaxMapSize Log2 of the physical size of the internal hash map managed by this
* sketch. The maximum capacity of this internal hash map is 0.75 times 2^lgMaxMapSize.
* Both the ultimate accuracy and size of this sketch are a function of lgMaxMapSize.
*
* @param lgCurMapSize Log2 of the starting (current) physical size of the internal hash
* map managed by this sketch.
*/
LongsSketch(final int lgMaxMapSize, final int lgCurMapSize) {
//set initial size of hash map
this.lgMaxMapSize = Math.max(lgMaxMapSize, LG_MIN_MAP_SIZE);
final int lgCurMapSz = Math.max(lgCurMapSize, LG_MIN_MAP_SIZE);
hashMap = new ReversePurgeLongHashMap(1 << lgCurMapSz);
curMapCap = hashMap.getCapacity();
final int maxMapCap =
(int) ((1 << lgMaxMapSize) * ReversePurgeLongHashMap.getLoadFactor());
offset = 0;
sampleSize = Math.min(SAMPLE_SIZE, maxMapCap);
}
/**
* Returns a sketch instance of this class from the given srcMem,
* which must be a Memory representation of this sketch class.
*
* @param srcMem a Memory representation of a sketch of this class.
* <a href="{@docRoot}/resources/dictionary.html#mem">See Memory</a>
* @return a sketch instance of this class.
*/
public static LongsSketch getInstance(final Memory srcMem) {
Objects.requireNonNull(srcMem, "Source Memory must not be null.");
final long pre0 = PreambleUtil.checkPreambleSize(srcMem); //check Memory capacity
final int maxPreLongs = Family.FREQUENCY.getMaxPreLongs();
final int preLongs = extractPreLongs(pre0); //Byte 0
final int serVer = extractSerVer(pre0); //Byte 1
final int familyID = extractFamilyID(pre0); //Byte 2
final int lgMaxMapSize = extractLgMaxMapSize(pre0); //Byte 3
final int lgCurMapSize = extractLgCurMapSize(pre0); //Byte 4
final boolean empty = (extractFlags(pre0) & EMPTY_FLAG_MASK) != 0; //Byte 5
// Checks
final boolean preLongsEq1 = (preLongs == 1); //Byte 0
final boolean preLongsEqMax = (preLongs == maxPreLongs);
if (!preLongsEq1 && !preLongsEqMax) {
throw new SketchesArgumentException(
"Possible Corruption: PreLongs must be 1 or " + maxPreLongs + ": " + preLongs);
}
if (serVer != SER_VER) { //Byte 1
throw new SketchesArgumentException(
"Possible Corruption: Ser Ver must be " + SER_VER + ": " + serVer);
}
final int actFamID = Family.FREQUENCY.getID(); //Byte 2
if (familyID != actFamID) {
throw new SketchesArgumentException(
"Possible Corruption: FamilyID must be " + actFamID + ": " + familyID);
}
if (empty ^ preLongsEq1) { //Byte 5 and Byte 0
throw new SketchesArgumentException(
"Possible Corruption: (PreLongs == 1) ^ Empty == True.");
}
if (empty) {
return new LongsSketch(lgMaxMapSize, LG_MIN_MAP_SIZE);
}
//get full preamble
final long[] preArr = new long[preLongs];
srcMem.getLongArray(0, preArr, 0, preLongs);
final LongsSketch fls = new LongsSketch(lgMaxMapSize, lgCurMapSize);
fls.streamWeight = 0; //update after
fls.offset = preArr[3];
final int preBytes = preLongs << 3;
final int activeItems = extractActiveItems(preArr[1]);
//Get countArray
final long[] countArray = new long[activeItems];
final int reqBytes = preBytes + 2 * activeItems * Long.BYTES; //count Arr + Items Arr
checkBounds(0, reqBytes, srcMem.getCapacity()); //check Memory capacity
srcMem.getLongArray(preBytes, countArray, 0, activeItems);
//Get itemArray
final int itemsOffset = preBytes + (Long.BYTES * activeItems);
final long[] itemArray = new long[activeItems];
srcMem.getLongArray(itemsOffset, itemArray, 0, activeItems);
//update the sketch
for (int i = 0; i < activeItems; i++) {
fls.update(itemArray[i], countArray[i]);
}
fls.streamWeight = preArr[2]; //override streamWeight due to updating
return fls;
}
/**
* Returns a sketch instance of this class from the given String,
* which must be a String representation of this sketch class.
*
* @param string a String representation of a sketch of this class.
* @return a sketch instance of this class.
*/
public static LongsSketch getInstance(final String string) {
Objects.requireNonNull(string, "string must not be null.");
final String[] tokens = string.split(",");
if (tokens.length < (STR_PREAMBLE_TOKENS + 2)) {
throw new SketchesArgumentException(
"String not long enough: " + tokens.length);
}
final int serVer = Integer.parseInt(tokens[0]);
final int famID = Integer.parseInt(tokens[1]);
final int lgMax = Integer.parseInt(tokens[2]);
final int flags = Integer.parseInt(tokens[3]);
final long streamWt = Long.parseLong(tokens[4]);
final long offset = Long.parseLong(tokens[5]); //error offset
//should always get at least the next 2 from the map
final int numActive = Integer.parseInt(tokens[6]);
final int lgCur = Integer.numberOfTrailingZeros(Integer.parseInt(tokens[7]));
//checks
if (serVer != SER_VER) {
throw new SketchesArgumentException("Possible Corruption: Bad SerVer: " + serVer);
}
Family.FREQUENCY.checkFamilyID(famID);
final boolean empty = flags > 0;
if (!empty && (numActive == 0)) {
throw new SketchesArgumentException(
"Possible Corruption: !Empty && NumActive=0; strLen: " + numActive);
}
final int numTokens = tokens.length;
if ((2 * numActive) != (numTokens - STR_PREAMBLE_TOKENS - 2)) {
throw new SketchesArgumentException(
"Possible Corruption: Incorrect # of tokens: " + numTokens
+ ", numActive: " + numActive);
}
final LongsSketch sketch = new LongsSketch(lgMax, lgCur);
sketch.streamWeight = streamWt;
sketch.offset = offset;
sketch.hashMap = deserializeFromStringArray(tokens);
return sketch;
}
/**
* Returns the estimated <i>a priori</i> error given the maxMapSize for the sketch and the
* estimatedTotalStreamWeight.
* @param maxMapSize the planned map size to be used when constructing this sketch.
* @param estimatedTotalStreamWeight the estimated total stream weight.
* @return the estimated <i>a priori</i> error.
*/
public static double getAprioriError(final int maxMapSize, final long estimatedTotalStreamWeight) {
return getEpsilon(maxMapSize) * estimatedTotalStreamWeight;
}
/**
* Returns the current number of counters the sketch is configured to support.
*
* @return the current number of counters the sketch is configured to support.
*/
public int getCurrentMapCapacity() {
return curMapCap;
}
/**
* Returns epsilon used to compute <i>a priori</i> error.
* This is just the value <i>3.5 / maxMapSize</i>.
* @param maxMapSize the planned map size to be used when constructing this sketch.
* @return epsilon used to compute <i>a priori</i> error.
*/
public static double getEpsilon(final int maxMapSize) {
if (!isIntPowerOf2(maxMapSize)) {
throw new SketchesArgumentException("maxMapSize is not a power of 2.");
}
return 3.5 / maxMapSize;
}
/**
* Gets the estimate of the frequency of the given item.
* Note: The true frequency of a item would be the sum of the counts as a result of the
* two update functions.
*
* @param item the given item
* @return the estimate of the frequency of the given item
*/
public long getEstimate(final long item) {
// If item is tracked:
// Estimate = itemCount + offset; Otherwise it is 0.
final long itemCount = hashMap.get(item);
return (itemCount > 0) ? itemCount + offset : 0;
}
/**
* Gets the guaranteed lower bound frequency of the given item, which can never be
* negative.
*
* @param item the given item.
* @return the guaranteed lower bound frequency of the given item. That is, a number which
* is guaranteed to be no larger than the real frequency.
*/
public long getLowerBound(final long item) {
//LB = itemCount or 0
return hashMap.get(item);
}
/**
* Returns an array of Rows that include frequent items, estimates, upper and lower bounds
* given a threshold and an ErrorCondition. If the threshold is lower than getMaximumError(),
* then getMaximumError() will be used instead.
*
* <p>The method first examines all active items in the sketch (items that have a counter).
*
* <p>If <i>ErrorType = NO_FALSE_NEGATIVES</i>, this will include an item in the result
* list if getUpperBound(item) > threshold.
* There will be no false negatives, i.e., no Type II error.
* There may be items in the set with true frequencies less than the threshold
* (false positives).</p>
*
* <p>If <i>ErrorType = NO_FALSE_POSITIVES</i>, this will include an item in the result
* list if getLowerBound(item) > threshold.
* There will be no false positives, i.e., no Type I error.
* There may be items omitted from the set with true frequencies greater than the
* threshold (false negatives). This is a subset of the NO_FALSE_NEGATIVES case.</p>
*
* @param threshold to include items in the result list
* @param errorType determines whether no false positives or no false negatives are
* desired.
* @return an array of frequent items
*/
public Row[] getFrequentItems(final long threshold, final ErrorType errorType) {
return sortItems(threshold > getMaximumError() ? threshold : getMaximumError(), errorType);
}
/**
* Returns an array of Rows that include frequent items, estimates, upper and lower bounds
* given an ErrorCondition and the default threshold.
* This is the same as getFrequentItems(getMaximumError(), errorType)
*
* @param errorType determines whether no false positives or no false negatives are
* desired.
* @return an array of frequent items
*/
public Row[] getFrequentItems(final ErrorType errorType) {
return sortItems(getMaximumError(), errorType);
}
/**
* @return An upper bound on the maximum error of getEstimate(item) for any item.
* This is equivalent to the maximum distance between the upper bound and the lower bound
* for any item.
*/
public long getMaximumError() {
return offset;
}
/**
* Returns the maximum number of counters the sketch is configured to support.
*
* @return the maximum number of counters the sketch is configured to support.
*/
public int getMaximumMapCapacity() {
return (int) ((1 << lgMaxMapSize) * ReversePurgeLongHashMap.getLoadFactor());
}
/**
* @return the number of active items in the sketch.
*/
public int getNumActiveItems() {
return hashMap.getNumActive();
}
/**
* Returns the number of bytes required to store this sketch as an array of bytes.
*
* @return the number of bytes required to store this sketch as an array of bytes.
*/
public int getStorageBytes() {
if (isEmpty()) { return 8; }
return (4 * 8) + (16 * getNumActiveItems());
}
/**
* Returns the sum of the frequencies (weights or counts) in the stream seen so far by the sketch
*
* @return the sum of the frequencies in the stream seen so far by the sketch
*/
public long getStreamLength() {
return streamWeight;
}
/**
* Gets the guaranteed upper bound frequency of the given item.
*
* @param item the given item
* @return the guaranteed upper bound frequency of the given item. That is, a number which
* is guaranteed to be no smaller than the real frequency.
*/
public long getUpperBound(final long item) {
// UB = itemCount + offset
return hashMap.get(item) + offset;
}
/**
* Returns true if this sketch is empty
*
* @return true if this sketch is empty
*/
public boolean isEmpty() {
return getNumActiveItems() == 0;
}
/**
* This function merges the other sketch into this one.
* The other sketch may be of a different size.
*
* @param other sketch of this class
* @return a sketch whose estimates are within the guarantees of the
* largest error tolerance of the two merged sketches.
*/
public LongsSketch merge(final LongsSketch other) {
if (other == null) { return this; }
if (other.isEmpty()) { return this; }
final long streamWt = streamWeight + other.streamWeight; //capture before merge
final ReversePurgeLongHashMap.Iterator iter = other.hashMap.iterator();
while (iter.next()) { //this may add to offset during rebuilds
this.update(iter.getKey(), iter.getValue());
}
offset += other.offset;
streamWeight = streamWt; //corrected streamWeight
return this;
}
/**
* Resets this sketch to a virgin state.
*/
public void reset() {
hashMap = new ReversePurgeLongHashMap(1 << LG_MIN_MAP_SIZE);
curMapCap = hashMap.getCapacity();
offset = 0;
streamWeight = 0;
}
//Serialization
/**
* Returns a String representation of this sketch
*
* @return a String representation of this sketch
*/
public String serializeToString() {
final StringBuilder sb = new StringBuilder();
//start the string with parameters of the sketch
final int serVer = SER_VER; //0
final int famID = Family.FREQUENCY.getID(); //1
final int lgMaxMapSz = lgMaxMapSize; //2
final int flags = (hashMap.getNumActive() == 0) ? EMPTY_FLAG_MASK : 0; //3
final String fmt = "%d,%d,%d,%d,%d,%d,";
final String s =
String.format(fmt, serVer, famID, lgMaxMapSz, flags, streamWeight, offset);
sb.append(s);
sb.append(hashMap.serializeToString()); //numActive, curMaplen, key[i], value[i], ...
// maxMapCap, samplesize are deterministic functions of maxMapSize,
// so we don't need them in the serialization
return sb.toString();
}
/**
* Returns a byte array representation of this sketch
* @return a byte array representation of this sketch
*/
public byte[] toByteArray() {
final int preLongs, outBytes;
final boolean empty = isEmpty();
final int activeItems = getNumActiveItems();
if (empty) {
preLongs = 1;
outBytes = 8;
} else {
preLongs = Family.FREQUENCY.getMaxPreLongs(); //4
outBytes = (preLongs + (2 * activeItems)) << 3; //2 because both keys and values are longs
}
final byte[] outArr = new byte[outBytes];
final WritableMemory mem = WritableMemory.writableWrap(outArr);
// build first preLong empty or not
long pre0 = 0L;
pre0 = insertPreLongs(preLongs, pre0); //Byte 0
pre0 = insertSerVer(SER_VER, pre0); //Byte 1
pre0 = insertFamilyID(Family.FREQUENCY.getID(), pre0); //Byte 2
pre0 = insertLgMaxMapSize(lgMaxMapSize, pre0); //Byte 3
pre0 = insertLgCurMapSize(hashMap.getLgLength(), pre0); //Byte 4
pre0 = (empty) ? insertFlags(EMPTY_FLAG_MASK, pre0) : insertFlags(0, pre0); //Byte 5
if (empty) {
mem.putLong(0, pre0);
} else {
final long pre = 0;
final long[] preArr = new long[preLongs];
preArr[0] = pre0;
preArr[1] = insertActiveItems(activeItems, pre);
preArr[2] = streamWeight;
preArr[3] = offset;
mem.putLongArray(0, preArr, 0, preLongs);
final int preBytes = preLongs << 3;
mem.putLongArray(preBytes, hashMap.getActiveValues(), 0, activeItems);
mem.putLongArray(preBytes + (activeItems << 3), hashMap.getActiveKeys(), 0,
activeItems);
}
return outArr;
}
/**
* Returns a human readable summary of this sketch.
* @return a human readable summary of this sketch.
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("FrequentLongsSketch:").append(LS);
sb.append(" Stream Length : " + streamWeight).append(LS);
sb.append(" Max Error Offset : " + offset).append(LS);
sb.append(hashMap.toString());
return sb.toString();
}
/**
* Returns a human readable string of the preamble of a byte array image of a LongsSketch.
* @param byteArr the given byte array
* @return a human readable string of the preamble of a byte array image of a LongsSketch.
*/
public static String toString(final byte[] byteArr) {
return toString(Memory.wrap(byteArr));
}
/**
* Returns a human readable string of the preamble of a Memory image of a LongsSketch.
* @param mem the given Memory object
* @return a human readable string of the preamble of a Memory image of a LongsSketch.
*/
public static String toString(final Memory mem) {
return PreambleUtil.preambleToString(mem);
}
/**
* Update this sketch with an item and a frequency count of one.
* @param item for which the frequency should be increased.
*/
public void update(final long item) {
update(item, 1);
}
/**
* Update this sketch with a item and a positive frequency count (or weight).
* @param item for which the frequency should be increased. The item can be any long value
* and is only used by the sketch to determine uniqueness.
* @param count the amount by which the frequency of the item should be increased.
* An count of zero is a no-op, and a negative count will throw an exception.
*/
public void update(final long item, final long count) {
if (count == 0) { return; }
if (count < 0) {
throw new SketchesArgumentException("Count may not be negative");
}
streamWeight += count;
hashMap.adjustOrPutValue(item, count);
if (getNumActiveItems() > curMapCap) { //over the threshold, we need to do something
if (hashMap.getLgLength() < lgMaxMapSize) { //below tgt size, we can grow
hashMap.resize(2 * hashMap.getLength());
curMapCap = hashMap.getCapacity();
} else { //At tgt size, must purge
offset += hashMap.purge(sampleSize);
if (getNumActiveItems() > getMaximumMapCapacity()) {
throw new SketchesStateException("Purge did not reduce active items.");
}
}
}
}
/**
* Row class that defines the return values from a getFrequentItems query.
*/
public static class Row implements Comparable<Row> {
final long item;
final long est;
final long ub;
final long lb;
private static final String fmt = (" %20d%20d%20d %d");
private static final String hfmt = (" %20s%20s%20s %s");
Row(final long item, final long estimate, final long ub, final long lb) {
this.item = item;
est = estimate;
this.ub = ub;
this.lb = lb;
}
/**
* @return item of type T
*/
public long getItem() { return item; }
/**
* @return the estimate
*/
public long getEstimate() { return est; }
/**
* @return the upper bound
*/
public long getUpperBound() { return ub; }
/**
* @return return the lower bound
*/
public long getLowerBound() { return lb; }
/**
* @return the descriptive row header
*/
public static String getRowHeader() {
return String.format(hfmt,"Est", "UB", "LB", "Item");
}
@Override
public String toString() {
return String.format(fmt, est, ub, lb, item);
}
/**
* This compareTo is strictly limited to the Row.getEstimate() value and does not imply any
* ordering whatsoever to the other elements of the row: item and upper and lower bounds.
* Defined this way, this compareTo will be consistent with hashCode() and equals(Object).
* @param that the other row to compare to.
* @return a negative integer, zero, or a positive integer as this.getEstimate() is less than,
* equal to, or greater than that.getEstimate().
*/
@Override
public int compareTo(final Row that) {
return (est < that.est) ? -1 : (est > that.est) ? 1 : 0;
}
/**
* This hashCode is computed only from the Row.getEstimate() value.
* Defined this way, this hashCode will be consistent with equals(Object):<br>
* If (x.equals(y)) implies: x.hashCode() == y.hashCode().<br>
* If (!x.equals(y)) does NOT imply: x.hashCode() != y.hashCode().
* @return the hashCode computed from getEstimate().
*/
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = (prime * result) + (int) (est ^ (est >>> 32));
return result;
}
/**
* This equals is computed only from the Row.getEstimate() value and does not imply equality
* of the other items within the row: item and upper and lower bounds.
* Defined this way, this equals will be consistent with compareTo(Row).
* @param obj the other row to determine equality with.
* @return true if this.getEstimate() equals ((Row)obj).getEstimate().
*/
@Override
public boolean equals(final Object obj) {
if (this == obj) { return true; }
if (obj == null) { return false; }
if ( !(obj instanceof Row)) { return false; }
final Row that = (Row) obj;
if (est != that.est) { return false; }
return true;
}
} // End of class Row
Row[] sortItems(final long threshold, final ErrorType errorType) {
final ArrayList<Row> rowList = new ArrayList<>();
final ReversePurgeLongHashMap.Iterator iter = hashMap.iterator();
if (errorType == ErrorType.NO_FALSE_NEGATIVES) {
while (iter.next()) {
final long est = getEstimate(iter.getKey());
final long ub = getUpperBound(iter.getKey());
final long lb = getLowerBound(iter.getKey());
if (ub >= threshold) {
final Row row = new Row(iter.getKey(), est, ub, lb);
rowList.add(row);
}
}
} else { //NO_FALSE_POSITIVES
while (iter.next()) {
final long est = getEstimate(iter.getKey());
final long ub = getUpperBound(iter.getKey());
final long lb = getLowerBound(iter.getKey());
if (lb >= threshold) {
final Row row = new Row(iter.getKey(), est, ub, lb);
rowList.add(row);
}
}
}
// descending order
rowList.sort(new Comparator<Row>() {
@Override
public int compare(final Row r1, final Row r2) {
return r2.compareTo(r1);
}
});
final Row[] rowsArr = rowList.toArray(new Row[rowList.size()]);
return rowsArr;
}
/**
* Deserializes an array of String tokens into a hash map object of this class.
*
* @param tokens the given array of Strings tokens.
* @return a hash map object of this class
*/
static ReversePurgeLongHashMap deserializeFromStringArray(final String[] tokens) {
final int ignore = STR_PREAMBLE_TOKENS;
final int numActive = Integer.parseInt(tokens[ignore]);
final int length = Integer.parseInt(tokens[ignore + 1]);
final ReversePurgeLongHashMap hashMap = new ReversePurgeLongHashMap(length);
int j = 2 + ignore;
for (int i = 0; i < numActive; i++) {
final long key = Long.parseLong(tokens[j++]);
final long value = Long.parseLong(tokens[j++]);
hashMap.adjustOrPutValue(key, value);
}
return hashMap;
}
}
| 2,622 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/frequencies/ReversePurgeItemHashMap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.frequencies;
import static org.apache.datasketches.common.Util.LS;
import static org.apache.datasketches.common.Util.exactLog2OfInt;
import static org.apache.datasketches.frequencies.Util.hash;
import java.lang.reflect.Array;
import org.apache.datasketches.thetacommon.QuickSelect;
/**
* Implements a linear-probing based hash map of (key, value) pairs and is distinguished by a
* "reverse" purge operation that removes all keys in the map whose associated values are ≤ 0
* and is performed in reverse, starting at the "back" of the array and moving toward the front.
*
* @param <T> The type of item to be tracked by this sketch
*
* @author Edo Liberty
* @author Justin Thaler
* @author Alexander Saydakov
*/
class ReversePurgeItemHashMap<T> {
private static final double LOAD_FACTOR = 0.75;
private static final int DRIFT_LIMIT = 1024; //used only in stress testing
private int lgLength;
protected int loadThreshold;
protected Object[] keys;
protected long[] values;
protected short[] states;
protected int numActive = 0;
/**
* Constructor will create arrays of length mapSize, which must be a power of two.
* This restriction was made to ensure fast hashing.
* The protected variable this.loadThreshold is then set to the largest value that
* will not overload the hash table.
*
* @param mapSize This determines the number of cells in the arrays underlying the
* HashMap implementation and must be a power of 2.
* The hash table will be expected to store LOAD_FACTOR * mapSize (key, value) pairs.
*/
ReversePurgeItemHashMap(final int mapSize) {
lgLength = exactLog2OfInt(mapSize, "mapSize");
this.loadThreshold = (int) (mapSize * LOAD_FACTOR);
this.keys = new Object[mapSize];
this.values = new long[mapSize];
this.states = new short[mapSize];
}
/**
* @param probe location in the hash table array
* @return true if the cell in the array contains an active key
*/
boolean isActive(final int probe) {
return states[probe] > 0;
}
/**
* Gets the current value with the given key
* @param key the given key
* @return the positive value the key corresponds to or zero if the key is not found in the
* hash map.
*/
long get(final T key) {
if (key == null) { return 0; }
final int probe = hashProbe(key);
if (states[probe] > 0) {
assert keys[probe].equals(key);
return values[probe];
}
return 0;
}
/**
* Increments the value mapped to the key if the key is present in the map. Otherwise,
* the key is inserted with the adjustAmount.
*
* @param key the key of the value to increment
* @param adjustAmount the amount by which to increment the value
*/
void adjustOrPutValue(final T key, final long adjustAmount) {
final int arrayMask = keys.length - 1;
int probe = (int) hash(key.hashCode()) & arrayMask;
int drift = 1;
while (states[probe] != 0 && !keys[probe].equals(key)) {
probe = probe + 1 & arrayMask;
drift++;
//only used for theoretical analysis
assert drift < DRIFT_LIMIT : "drift: " + drift + " >= DRIFT_LIMIT";
}
if (states[probe] == 0) {
// adding the key to the table the value
assert numActive <= loadThreshold
: "numActive: " + numActive + " > loadThreshold: " + loadThreshold;
keys[probe] = key;
values[probe] = adjustAmount;
states[probe] = (short) drift;
numActive++;
} else {
// adjusting the value of an existing key
assert keys[probe].equals(key);
values[probe] += adjustAmount;
}
}
/**
* Processes the map arrays and retains only keys with positive counts.
*/
void keepOnlyPositiveCounts() {
// Starting from the back, find the first empty cell,
// which establishes the high end of a cluster.
int firstProbe = states.length - 1;
while (states[firstProbe] > 0) {
firstProbe--;
}
// firstProbe keeps track of this point.
// When we find the next non-empty cell, we know we are at the high end of a cluster
// Work towards the front; delete any non-positive entries.
for (int probe = firstProbe; probe-- > 0;) {
if (states[probe] > 0 && values[probe] <= 0) {
hashDelete(probe); //does the work of deletion and moving higher items towards the front.
numActive--;
}
}
//now work on the first cluster that was skipped.
for (int probe = states.length; probe-- > firstProbe;) {
if (states[probe] > 0 && values[probe] <= 0) {
hashDelete(probe);
numActive--;
}
}
}
/**
* @param adjustAmount value by which to shift all values. Only keys corresponding to positive
* values are retained.
*/
void adjustAllValuesBy(final long adjustAmount) {
for (int i = values.length; i-- > 0;) {
values[i] += adjustAmount;
}
}
/**
* @return an array containing the active keys in the hash map.
*/
@SuppressWarnings("unchecked")
T[] getActiveKeys() {
if (numActive == 0) { return null; }
T[] returnedKeys = null;
int j = 0;
for (int i = 0; i < keys.length; i++) {
if (isActive(i)) {
if (returnedKeys == null) {
returnedKeys = (T[]) Array.newInstance(keys[i].getClass(), numActive);
}
returnedKeys[j] = (T) keys[i];
j++;
}
}
assert j == numActive : "j: " + j + " != numActive: " + numActive;
return returnedKeys;
}
/**
* @return an array containing the values corresponding to the active keys in the hash
*/
long[] getActiveValues() {
if (numActive == 0) { return null; }
final long[] returnedValues = new long[numActive];
int j = 0;
for (int i = 0; i < values.length; i++) {
if (isActive(i)) {
returnedValues[j] = values[i];
j++;
}
}
assert j == numActive;
return returnedValues;
}
// assume newSize is power of 2
@SuppressWarnings("unchecked")
void resize(final int newSize) {
final Object[] oldKeys = keys;
final long[] oldValues = values;
final short[] oldStates = states;
keys = new Object[newSize];
values = new long[newSize];
states = new short[newSize];
loadThreshold = (int) (newSize * LOAD_FACTOR);
lgLength = Integer.numberOfTrailingZeros(newSize);
numActive = 0;
for (int i = 0; i < oldKeys.length; i++) {
if (oldStates[i] > 0) {
adjustOrPutValue((T) oldKeys[i], oldValues[i]);
}
}
}
/**
* @return length of hash table internal arrays
*/
int getLength() {
return keys.length;
}
int getLgLength() {
return lgLength;
}
/**
* @return capacity of hash table internal arrays (i.e., max number of keys that can be stored)
*/
int getCapacity() {
return loadThreshold;
}
/**
* @return number of populated keys
*/
int getNumActive() {
return numActive;
}
/**
* Returns the hash table as a human readable string.
*/
@Override
public String toString() {
final String fmt = " %12d:%11d%12d %s";
final String hfmt = " %12s:%11s%12s %s";
final StringBuilder sb = new StringBuilder();
sb.append("ReversePurgeItemHashMap").append(LS);
sb.append(String.format(hfmt, "Index","States","Values","Keys")).append(LS);
for (int i = 0; i < keys.length; i++) {
if (states[i] <= 0) { continue; }
sb.append(String.format(fmt, i, states[i], values[i], keys[i].toString()));
sb.append(LS);
}
return sb.toString();
}
/**
* @return the load factor of the hash table, i.e, the ratio between the capacity and the array
* length
*/
static double getLoadFactor() {
return LOAD_FACTOR;
}
/**
* This function is called when a key is processed that is not currently assigned a counter, and
* all the counters are in use. This function estimates the median of the counters in the sketch
* via sampling, decrements all counts by this estimate, throws out all counters that are no
* longer positive, and increments offset accordingly.
* @param sampleSize number of samples
* @return the median value
*/
long purge(final int sampleSize) {
final int limit = Math.min(sampleSize, getNumActive());
int numSamples = 0;
int i = 0;
final long[] samples = new long[limit];
while (numSamples < limit) {
if (isActive(i)) {
samples[numSamples] = values[i];
numSamples++;
}
i++;
}
final long val = QuickSelect.select(samples, 0, numSamples - 1, limit / 2);
adjustAllValuesBy(-1 * val);
keepOnlyPositiveCounts();
return val;
}
private void hashDelete(int deleteProbe) {
// Looks ahead in the table to search for another
// item to move to this location
// if none are found, the status is changed
states[deleteProbe] = 0; //mark as empty
int drift = 1;
final int arrayMask = keys.length - 1;
int probe = deleteProbe + drift & arrayMask; //map length must be a power of 2
// advance until you find a free location replacing locations as needed
while (states[probe] != 0) {
if (states[probe] > drift) {
// move current element
keys[deleteProbe] = keys[probe];
values[deleteProbe] = values[probe];
states[deleteProbe] = (short) (states[probe] - drift);
// marking this location as deleted
states[probe] = 0;
drift = 0;
deleteProbe = probe;
}
probe = probe + 1 & arrayMask;
drift++;
//only used for theoretical analysis
assert drift < DRIFT_LIMIT : "drift: " + drift + " >= DRIFT_LIMIT";
}
}
private int hashProbe(final T key) {
final int arrayMask = keys.length - 1;
int probe = (int) hash(key.hashCode()) & arrayMask;
while (states[probe] > 0 && !keys[probe].equals(key)) {
probe = probe + 1 & arrayMask;
}
return probe;
}
Iterator<T> iterator() {
return new Iterator<>(keys, values, states, numActive);
}
// This iterator uses strides based on golden ratio to avoid clustering during merge
static class Iterator<T> {
private static final double GOLDEN_RATIO_RECIPROCAL = (Math.sqrt(5) - 1) / 2;
private final Object[] keys_;
private final long[] values_;
private final short[] states_;
private final int numActive_;
private final int stride_;
private final int mask_;
private int i_;
private int count_;
Iterator(final Object[] keys, final long[] values, final short[] states, final int numActive) {
keys_ = keys;
values_ = values;
states_ = states;
numActive_ = numActive;
stride_ = (int) (keys.length * GOLDEN_RATIO_RECIPROCAL) | 1;
mask_ = keys.length - 1;
i_ = -stride_;
count_ = 0;
}
boolean next() {
i_ = i_ + stride_ & mask_;
while (count_ < numActive_) {
if (states_[i_] > 0) {
count_++;
return true;
}
i_ = i_ + stride_ & mask_;
}
return false;
}
@SuppressWarnings("unchecked")
T getKey() {
return (T) keys_[i_];
}
long getValue() {
return values_[i_];
}
}
}
| 2,623 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/frequencies/ErrorType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.frequencies;
/**
* Specifies one of two types of error regions of the statistical classification Confusion Matrix
* that can be excluded from a returned sample of Frequent Items.
*/
public enum ErrorType {
/**
* No <i>Type I</i> error samples will be included in the sample set,
* which means all <i>Truly Negative</i> samples will be excluded from the sample set.
* However, there may be <i>Type II</i> error samples (<i>False Negatives</i>)
* that should have been included that were not.
* This is a subset of the NO_FALSE_NEGATIVES ErrorType.
*/
NO_FALSE_POSITIVES,
/**
* No <i>Type II</i> error samples will be excluded from the sample set,
* which means all <i>Truly Positive</i> samples will be included in the sample set.
* However, there may be <i>Type I</i> error samples (<i>False Positives</i>)
* that were included that should not have been.
*/
NO_FALSE_NEGATIVES
}
| 2,624 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/frequencies/ReversePurgeLongHashMap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.frequencies;
import static org.apache.datasketches.common.Util.LS;
import static org.apache.datasketches.common.Util.exactLog2OfInt;
import static org.apache.datasketches.common.Util.INVERSE_GOLDEN;
import static org.apache.datasketches.frequencies.Util.hash;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.thetacommon.QuickSelect;
/**
* Implements a linear-probing based hash map of (key, value) pairs and is distinguished by a
* "reverse" purge operation that removes all keys in the map whose associated values are ≤ 0
* and is performed in reverse, starting at the "back" of the array and moving toward the front.
*
* @author Edo Liberty
* @author Justin Thaler
* @author Lee Rhodes
*/
class ReversePurgeLongHashMap {
private static final double LOAD_FACTOR = 0.75;
private static final int DRIFT_LIMIT = 1024; //used only in stress testing
private int lgLength;
private int loadThreshold;
private long[] keys;
private long[] values;
private short[] states;
private int numActive = 0;
/**
* Constructor will create arrays of length mapSize, which must be a power of two.
* This restriction was made to ensure fast hashing.
* The protected variable this.loadThreshold is then set to the largest value that
* will not overload the hash table.
*
* @param mapSize This determines the number of cells in the arrays underlying the
* HashMap implementation and must be a power of 2.
* The hash table will be expected to store LOAD_FACTOR * mapSize (key, value) pairs.
*/
ReversePurgeLongHashMap(final int mapSize) {
lgLength = exactLog2OfInt(mapSize, "mapSize");
loadThreshold = (int) (mapSize * LOAD_FACTOR);
keys = new long[mapSize];
values = new long[mapSize];
states = new short[mapSize];
}
/**
* Returns an instance of this class from the given String,
* which must be a String representation of this class.
*
* @param string a String representation of this class.
* @return an instance of this class.
*/
static ReversePurgeLongHashMap getInstance(final String string) {
final String[] tokens = string.split(",");
if (tokens.length < 2) {
throw new SketchesArgumentException(
"String not long enough to specify length and capacity.");
}
final int numActive = Integer.parseInt(tokens[0]);
final int length = Integer.parseInt(tokens[1]);
final ReversePurgeLongHashMap table = new ReversePurgeLongHashMap(length);
int j = 2;
for (int i = 0; i < numActive; i++) {
final long key = Long.parseLong(tokens[j++]);
final long value = Long.parseLong(tokens[j++]);
table.adjustOrPutValue(key, value);
}
return table;
}
//Serialization
/**
* Returns a String representation of this hash map.
*
* @return a String representation of this hash map.
*/
String serializeToString() {
final StringBuilder sb = new StringBuilder();
sb.append(String.format("%d,%d,", numActive, keys.length));
for (int i = 0; i < keys.length; i++) {
if (states[i] != 0) {
sb.append(String.format("%d,%d,", keys[i], values[i]));
}
}
return sb.toString();
}
/**
* @param probe location in the hash table array
* @return true if the cell in the array contains an active key
*/
boolean isActive(final int probe) {
return (states[probe] > 0);
}
/**
* Gets the current value with the given key
* @param key the given key
* @return the positive value the key corresponds to or zero if the key is not found in the
* hash map.
*/
long get(final long key) {
final int probe = hashProbe(key);
if (states[probe] > 0) {
assert (keys[probe] == key);
return values[probe];
}
return 0;
}
/**
* Increments the value mapped to the key if the key is present in the map. Otherwise,
* the key is inserted with the putAmount.
*
* @param key the key of the value to increment
* @param adjustAmount the amount by which to increment the value
*/
void adjustOrPutValue(final long key, final long adjustAmount) {
final int arrayMask = keys.length - 1;
int probe = (int) hash(key) & arrayMask;
int drift = 1;
while ((states[probe] != 0) && (keys[probe] != key)) {
probe = (probe + 1) & arrayMask;
drift++;
//only used for theoretical analysis
assert (drift < DRIFT_LIMIT) : "drift: " + drift + " >= DRIFT_LIMIT";
}
//found either an empty slot or the key
if (states[probe] == 0) { //found empty slot
// adding the key and value to the table
assert (numActive <= loadThreshold)
: "numActive: " + numActive + " > loadThreshold : " + loadThreshold;
keys[probe] = key;
values[probe] = adjustAmount;
states[probe] = (short) drift; //how far off we are
numActive++;
} else { //found the key, adjust the value
assert (keys[probe] == key);
values[probe] += adjustAmount;
}
}
/**
* Processes the map arrays and retains only keys with positive counts.
*/
void keepOnlyPositiveCounts() {
// Starting from the back, find the first empty cell, which marks a boundary between clusters.
int firstProbe = keys.length - 1;
while (states[firstProbe] > 0) {
firstProbe--;
}
//Work towards the front; delete any non-positive entries.
for (int probe = firstProbe; probe-- > 0; ) {
// When we find the next non-empty cell, we know we are at the high end of a cluster,
// which is tracked by firstProbe.
if ((states[probe] > 0) && (values[probe] <= 0)) {
hashDelete(probe); //does the work of deletion and moving higher items towards the front.
numActive--;
}
}
//now work on the first cluster that was skipped.
for (int probe = keys.length; probe-- > firstProbe;) {
if ((states[probe] > 0) && (values[probe] <= 0)) {
hashDelete(probe);
numActive--;
}
}
}
/**
* @param adjustAmount value by which to shift all values. Only keys corresponding to positive
* values are retained.
*/
void adjustAllValuesBy(final long adjustAmount) {
for (int i = keys.length; i-- > 0; ) {
values[i] += adjustAmount;
}
}
/**
* @return an array containing the active keys in the hash map.
*/
long[] getActiveKeys() {
if (numActive == 0) { return null; }
final long[] returnedKeys = new long[numActive];
int j = 0;
for (int i = 0; i < keys.length; i++) {
if (isActive(i)) {
returnedKeys[j] = keys[i];
j++;
}
}
assert (j == numActive) : "j: " + j + " != numActive: " + numActive;
return returnedKeys;
}
/**
* @return an array containing the values corresponding. to the active keys in the hash
*/
long[] getActiveValues() {
if (numActive == 0) { return null; }
final long[] returnedValues = new long[numActive];
int j = 0;
for (int i = 0; i < values.length; i++) {
if (isActive(i)) {
returnedValues[j] = values[i];
j++;
}
}
assert (j == numActive);
return returnedValues;
}
// assume newSize is power of 2
void resize(final int newSize) {
final long[] oldKeys = keys;
final long[] oldValues = values;
final short[] oldStates = states;
keys = new long[newSize];
values = new long[newSize];
states = new short[newSize];
loadThreshold = (int) (newSize * LOAD_FACTOR);
lgLength = Integer.numberOfTrailingZeros(newSize);
numActive = 0;
for (int i = 0; i < oldKeys.length; i++) {
if (oldStates[i] > 0) {
adjustOrPutValue(oldKeys[i], oldValues[i]);
}
}
}
/**
* @return length of hash table internal arrays
*/
int getLength() {
return keys.length;
}
int getLgLength() {
return lgLength;
}
/**
* @return capacity of hash table internal arrays (i.e., max number of keys that can be stored)
*/
int getCapacity() {
return loadThreshold;
}
/**
* @return number of populated keys
*/
int getNumActive() {
return numActive;
}
/**
* Returns the hash table as a human readable string.
*/
@Override
public String toString() {
final String fmt = " %12d:%11d%20d %d";
final String hfmt = " %12s:%11s%20s %s";
final StringBuilder sb = new StringBuilder();
sb.append("ReversePurgeLongHashMap:").append(LS);
sb.append(String.format(hfmt, "Index","States","Values","Keys")).append(LS);
for (int i = 0; i < keys.length; i++) {
if (states[i] <= 0) { continue; }
sb.append(String.format(fmt, i, states[i], values[i], keys[i])).append(LS);
}
return sb.toString();
}
/**
* @return the load factor of the hash table, i.e, the ratio between the capacity and the array
* length
*/
static double getLoadFactor() {
return LOAD_FACTOR;
}
/**
* This function is called when a key is processed that is not currently assigned a counter, and
* all the counters are in use. This function estimates the median of the counters in the sketch
* via sampling, decrements all counts by this estimate, throws out all counters that are no
* longer positive, and increments offset accordingly.
* @param sampleSize number of samples
* @return the median value
*/
long purge(final int sampleSize) {
final int limit = Math.min(sampleSize, getNumActive());
int numSamples = 0;
int i = 0;
final long[] samples = new long[limit];
while (numSamples < limit) {
if (isActive(i)) {
samples[numSamples] = values[i];
numSamples++;
}
i++;
}
final long val = QuickSelect.select(samples, 0, numSamples - 1, limit / 2);
adjustAllValuesBy(-1 * val);
keepOnlyPositiveCounts();
return val;
}
private void hashDelete(int deleteProbe) {
// Looks ahead in the table to search for another item to move to this location.
// If none are found, the status is changed
states[deleteProbe] = 0; //mark as empty
int drift = 1;
final int arrayMask = keys.length - 1;
int probe = (deleteProbe + drift) & arrayMask; //map length must be a power of 2
// advance until you find a free location replacing locations as needed
while (states[probe] != 0) {
if (states[probe] > drift) {
// move current element
keys[deleteProbe] = keys[probe];
values[deleteProbe] = values[probe];
states[deleteProbe] = (short) (states[probe] - drift);
// marking the current probe location as deleted
states[probe] = 0;
drift = 0;
deleteProbe = probe;
}
probe = (probe + 1) & arrayMask;
drift++;
//only used for theoretical analysis
assert (drift < DRIFT_LIMIT) : "drift: " + drift + " >= DRIFT_LIMIT";
}
}
private int hashProbe(final long key) {
final int arrayMask = keys.length - 1;
int probe = (int) hash(key) & arrayMask;
while ((states[probe] > 0) && (keys[probe] != key)) {
probe = (probe + 1) & arrayMask;
}
return probe;
}
Iterator iterator() {
return new Iterator(keys, values, states, numActive);
}
// This iterator uses strides based on golden ratio to avoid clustering during merge
static class Iterator {
private final long[] keys_;
private final long[] values_;
private final short[] states_;
private final int numActive_;
private final int stride_;
private final int mask_;
private int i_;
private int count_;
Iterator(final long[] keys, final long[] values, final short[] states, final int numActive) {
keys_ = keys;
values_ = values;
states_ = states;
numActive_ = numActive;
stride_ = (int) (keys.length * INVERSE_GOLDEN) | 1;
mask_ = keys.length - 1;
i_ = -stride_;
count_ = 0;
}
boolean next() {
i_ = (i_ + stride_) & mask_;
while (count_ < numActive_) {
if (states_[i_] > 0) {
count_++;
return true;
}
i_ = (i_ + stride_) & mask_;
}
return false;
}
long getKey() {
return keys_[i_];
}
long getValue() {
return values_[i_];
}
}
}
| 2,625 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/frequencies/PreambleUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.frequencies;
import static org.apache.datasketches.common.Util.LS;
import static org.apache.datasketches.common.Util.zeroPad;
import org.apache.datasketches.common.Family;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.memory.Memory;
// @formatter:off
/**
* This class defines the preamble data structure and provides basic utilities for some of the key
* fields.
* <p>
* The intent of the design of this class was to isolate the detailed knowledge of the bit and byte
* layout of the serialized form of the sketches derived from the Sketch class into one place. This
* allows the possibility of the introduction of different serialization schemes with minimal impact
* on the rest of the library.
* </p>
*
* <p>
* MAP: Low significance bytes of this <i>long</i> data structure are on the right. However, the
* multi-byte integers (<i>int</i> and <i>long</i>) are stored in native byte order. The <i>byte</i>
* values are treated as unsigned.
* </p>
*
* <p>
* An empty FrequentItems only requires 8 bytes. All others require 32 bytes of preamble.
* </p>
*
* <pre>
* * Long || Start Byte Adr:
* Adr:
* || 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
* 0 ||------ unused -----|-Flags--|-LgCur--| LgMax | FamID | SerVer | PreambleLongs |
* || 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 |
* 1 ||------------(unused)-----------------|--------ActiveItems------------------------|
* || 23 | 22 | 21 | 20 | 19 | 18 | 17 | 16 |
* 2 ||-----------------------------------streamLength----------------------------------|
* || 31 | 30 | 29 | 28 | 27 | 26 | 25 | 24 |
* 3 ||---------------------------------offset------------------------------------------|
* || 39 | 38 | 37 | 36 | 35 | 34 | 33 | 32 |
* 5 ||----------start of values buffer, followed by keys buffer------------------------|
* </pre>
*
* @author Lee Rhodes
*/
final class PreambleUtil {
private PreambleUtil() {}
// ###### DO NOT MESS WITH THIS FROM HERE ...
// Preamble byte Addresses
static final int PREAMBLE_LONGS_BYTE = 0; // either 1 or 4
static final int SER_VER_BYTE = 1;
static final int FAMILY_BYTE = 2;
static final int LG_MAX_MAP_SIZE_BYTE = 3;
static final int LG_CUR_MAP_SIZE_BYTE = 4;
static final int FLAGS_BYTE = 5;
static final int SER_DE_ID_SHORT = 6; // to 7
static final int ACTIVE_ITEMS_INT = 8; // to 11 : 0 to 4 in pre1
static final int STREAMLENGTH_LONG = 16; // to 23 : pre2
static final int OFFSET_LONG = 24; // to 31 : pre3
// flag bit masks
// due to a mistake different bits were used in C++ and Java to indicate empty sketch
// therefore both are set and checked for compatibility with historical binary format
static final int EMPTY_FLAG_MASK = 5;
// Specific values for this implementation
static final int SER_VER = 1;
/**
* Returns a human readable string summary of the preamble state of the given Memory.
* Note: other than making sure that the given Memory size is large
* enough for just the preamble, this does not do much value checking of the contents of the
* preamble as this is primarily a tool for debugging the preamble visually.
*
* @param srcMem the given Memory.
* @return the summary preamble string.
*/
public static String preambleToString(final Memory srcMem) {
final long pre0 = checkPreambleSize(srcMem); //make sure we can get the assumed preamble
final int preLongs = extractPreLongs(pre0); //byte 0
final int serVer = extractSerVer(pre0); //byte 1
final Family family = Family.idToFamily(extractFamilyID(pre0)); //byte 2
final int lgMaxMapSize = extractLgMaxMapSize(pre0); //byte 3
final int lgCurMapSize = extractLgCurMapSize(pre0); //byte 4
final int flags = extractFlags(pre0); //byte 5
final String flagsStr = zeroPad(Integer.toBinaryString(flags), 8) + ", " + (flags);
final boolean empty = (flags & EMPTY_FLAG_MASK) > 0;
final int maxMapSize = 1 << lgMaxMapSize;
final int curMapSize = 1 << lgCurMapSize;
final int maxPreLongs = Family.FREQUENCY.getMaxPreLongs();
//Assumed if preLongs == 1
int activeItems = 0;
long streamLength = 0;
long offset = 0;
//Assumed if preLongs == maxPreLongs
if (preLongs == maxPreLongs) {
//get full preamble
final long[] preArr = new long[preLongs];
srcMem.getLongArray(0, preArr, 0, preLongs);
activeItems = extractActiveItems(preArr[1]);
streamLength = preArr[2];
offset = preArr[3];
}
final StringBuilder sb = new StringBuilder();
sb.append(LS)
.append("### FREQUENCY SKETCH PREAMBLE SUMMARY:").append(LS)
.append("Byte 0: Preamble Longs : ").append(preLongs).append(LS)
.append("Byte 1: Serialization Version: ").append(serVer).append(LS)
.append("Byte 2: Family : ").append(family.toString()).append(LS)
.append("Byte 3: MaxMapSize : ").append(maxMapSize).append(LS)
.append("Byte 4: CurMapSize : ").append(curMapSize).append(LS)
.append("Byte 5: Flags Field : ").append(flagsStr).append(LS)
.append(" EMPTY : ").append(empty).append(LS);
if (preLongs == 1) {
sb.append(" --ABSENT, ASSUMED:").append(LS);
} else { //preLongs == maxPreLongs
sb.append("Bytes 8-11 : ActiveItems : ").append(activeItems).append(LS);
sb.append("Bytes 16-23: StreamLength : ").append(streamLength).append(LS)
.append("Bytes 24-31: Offset : ").append(offset).append(LS);
}
sb.append( "Preamble Bytes : ").append(preLongs * 8).append(LS);
sb.append( "TOTAL Sketch Bytes : ").append((preLongs + (activeItems * 2)) << 3)
.append(LS)
.append("### END FREQUENCY SKETCH PREAMBLE SUMMARY").append(LS);
return sb.toString();
}
// @formatter:on
static int extractPreLongs(final long pre0) { //Byte 0
final long mask = 0X3FL; //Lower 6 bits
return (int) (pre0 & mask);
}
static int extractSerVer(final long pre0) { //Byte 1
final int shift = SER_VER_BYTE << 3;
final long mask = 0XFFL;
return (int) ((pre0 >>> shift) & mask);
}
static int extractFamilyID(final long pre0) { //Byte 2
final int shift = FAMILY_BYTE << 3;
final long mask = 0XFFL;
return (int) ((pre0 >>> shift) & mask);
}
static int extractLgMaxMapSize(final long pre0) { //Byte 3
final int shift = LG_MAX_MAP_SIZE_BYTE << 3;
final long mask = 0XFFL;
return (int) ((pre0 >>> shift) & mask);
}
static int extractLgCurMapSize(final long pre0) { //Byte 4
final int shift = LG_CUR_MAP_SIZE_BYTE << 3;
final long mask = 0XFFL;
return (int) ((pre0 >>> shift) & mask);
}
static int extractFlags(final long pre0) { //Byte 5
final int shift = FLAGS_BYTE << 3;
final long mask = 0XFFL;
return (int) ((pre0 >>> shift) & mask);
}
static int extractActiveItems(final long pre1) { //Bytes 8 to 11
final long mask = 0XFFFFFFFFL;
return (int) (pre1 & mask) ;
}
static long insertPreLongs(final int preLongs, final long pre0) { //Byte 0
final long mask = 0X3FL; //Lower 6 bits
return (preLongs & mask) | (~mask & pre0);
}
static long insertSerVer(final int serVer, final long pre0) { //Byte 1
final int shift = SER_VER_BYTE << 3;
final long mask = 0XFFL;
return ((serVer & mask) << shift) | (~(mask << shift) & pre0);
}
static long insertFamilyID(final int familyID, final long pre0) { //Byte 2
final int shift = FAMILY_BYTE << 3;
final long mask = 0XFFL;
return ((familyID & mask) << shift) | (~(mask << shift) & pre0);
}
static long insertLgMaxMapSize(final int lgMaxMapSize, final long pre0) { //Byte 3
final int shift = LG_MAX_MAP_SIZE_BYTE << 3;
final long mask = 0XFFL;
return ((lgMaxMapSize & mask) << shift) | (~(mask << shift) & pre0);
}
static long insertLgCurMapSize(final int lgCurMapSize, final long pre0) { //Byte 4
final int shift = LG_CUR_MAP_SIZE_BYTE << 3;
final long mask = 0XFFL;
return ((lgCurMapSize & mask) << shift) | (~(mask << shift) & pre0);
}
static long insertFlags(final int flags, final long pre0) { //Byte 5
final int shift = FLAGS_BYTE << 3;
final long mask = 0XFFL;
return ((flags & mask) << shift) | (~(mask << shift) & pre0);
}
static long insertActiveItems(final int activeItems, final long pre1) { //Bytes 8 to 11
final long mask = 0XFFFFFFFFL;
return (activeItems & mask) | (~mask & pre1);
}
/**
* Checks Memory for capacity to hold the preamble and returns the first 8 bytes.
* @param mem the given Memory
* @return the first 8 bytes of preamble as a long.
*/
static long checkPreambleSize(final Memory mem) {
final long cap = mem.getCapacity();
if (cap < 8) { throwNotBigEnough(cap, 8); }
final long pre0 = mem.getLong(0);
final int preLongs = (int) (pre0 & 0X3FL); //lower 6 bits
final int required = Math.max(preLongs << 3, 8);
if (cap < required) { throwNotBigEnough(cap, required); }
return pre0;
}
private static void throwNotBigEnough(final long cap, final int required) {
throw new SketchesArgumentException(
"Possible Corruption: "
+ "Size of byte array or Memory not large enough for Preamble: Size: " + cap
+ ", Required: " + required);
}
}
| 2,626 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/frequencies/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* This package is dedicated to streaming algorithms that enable estimation of the
* frequency of occurrence of items in a weighted multiset stream of items.
* If the frequency distribution of items is sufficiently skewed, these algorithms are very
* useful in identifying the "Heavy Hitters" that occurred most frequently in the stream.
* The accuracy of the estimation of the frequency of an item has well understood error
* bounds that can be returned by the sketch.
*
* <p>These algorithms are sometimes referred to as "TopN" algorithms.</p>
*/
package org.apache.datasketches.frequencies;
| 2,627 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/fdt/Group.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.fdt;
/**
* Defines a Group from a Frequent Distinct Tuple query. This class is called internally during
* post processing and is not intended to be called by the user.
* @author Lee Rhodes
*/
public class Group implements Comparable<Group> {
private int count = 0;
private double est = 0;
private double ub = 0;
private double lb = 0;
private double fraction = 0;
private double rse = 0;
private String priKey = null;
private final static String fmt =
"%,12d" + "%,15.2f" + "%,15.2f" + "%,15.2f" + "%12.6f" + "%12.6f" + " %s";
private final static String hfmt =
"%12s" + "%15s" + "%15s" + "%15s" + "%12s" + "%12s" + " %s";
/**
* Construct an empty Group
*/
public Group() { }
/**
* Specifies the parameters to be listed as columns
* @param priKey the primary key of the FDT sketch
* @param count the number of retained rows associated with this group
* @param estimate the estimate of the original population associated with this group
* @param ub the upper bound of the estimate
* @param lb the lower bound of the estimate
* @param fraction the fraction of all retained rows of the sketch associated with this group
* @param rse the estimated Relative Standard Error for this group.
* @return return this
*/
public Group init(final String priKey, final int count, final double estimate, final double ub,
final double lb, final double fraction, final double rse) {
this.count = count;
est = estimate;
this.ub = ub;
this.lb = lb;
this.fraction = fraction;
this.rse = rse;
this.priKey = priKey;
return this;
}
/**
* @return priKey of type T
*/
public String getPrimaryKey() { return priKey; }
/**
* @return the count
*/
public int getCount() { return count; }
/**
* @return the estimate
*/
public double getEstimate() { return est; }
/**
* @return the upper bound
*/
public double getUpperBound() { return ub; }
/**
* @return the lower bound
*/
public double getLowerBound() { return lb; }
/**
* @return the fraction for this group
*/
public double getFraction() { return fraction; }
/**
* @return the RSE
*/
public double getRse() { return rse; }
/**
* @return the descriptive header
*/
public String getHeader() {
return String.format(hfmt,"Count", "Est", "UB", "LB", "Fraction", "RSE", "PriKey");
}
@Override
public String toString() {
return String.format(fmt, count, est, ub, lb, fraction, rse, priKey);
}
/**
* @param that The Group to compare to
*/
@Override
public int compareTo(final Group that) {
return that.count - count; //decreasing
}
@Override
public boolean equals(final Object that) {
if (this == that) { return true; }
if (!(that instanceof Group)) { return false; }
return ((Group)that).count == count;
}
@Override
public int hashCode() {
return Integer.MAX_VALUE - count; //MAX_VALUE is a Double Mersenne Prime = 2^31 - 1 = M_M_5
}
}
| 2,628 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/fdt/FdtSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.fdt;
import java.util.List;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.thetacommon.ThetaUtil;
import org.apache.datasketches.tuple.strings.ArrayOfStringsSketch;
/**
* A Frequent Distinct Tuples sketch.
*
* <p>Suppose our data is a stream of pairs {IP address, User ID} and we want to identify the
* IP addresses that have the most distinct User IDs. Or conversely, we would like to identify
* the User IDs that have the most distinct IP addresses. This is a common challenge in the
* analysis of big data and the FDT sketch helps solve this problem using probabilistic techniques.
*
* <p>More generally, given a multiset of tuples with dimensions <i>{d1,d2, d3, ..., dN}</i>,
* and a primary subset of dimensions <i>M < N</i>, our task is to identify the combinations of
* <i>M</i> subset dimensions that have the most frequent number of distinct combinations of
* the <i>N-M</i> non-primary dimensions.
*
* <p>Please refer to the web page
* <a href="https://datasketches.apache.org/docs/Frequency/FrequentDistinctTuplesSketch.html">
* https://datasketches.apache.org/docs/Frequency/FrequentDistinctTuplesSketch.html</a> for a more
* complete discussion about this sketch.
*
* @author Lee Rhodes
*/
public class FdtSketch extends ArrayOfStringsSketch {
/**
* Create new instance of Frequent Distinct Tuples sketch with the given
* Log-base2 of required nominal entries.
* @param lgK Log-base2 of required nominal entries.
*/
public FdtSketch(final int lgK) {
super(lgK);
}
/**
* Used by deserialization.
* @param mem the image of a FdtSketch
* @deprecated As of 3.0.0, heapifying an UpdatableSketch is deprecated.
* This capability will be removed in a future release.
* Heapifying a CompactSketch is not deprecated.
*/
@Deprecated
FdtSketch(final Memory mem) {
super(mem);
}
/**
* Create a new instance of Frequent Distinct Tuples sketch with a size determined by the given
* threshold and rse.
* @param threshold : the fraction, between zero and 1.0, of the total distinct stream length
* that defines a "Frequent" (or heavy) item.
* @param rse the maximum Relative Standard Error for the estimate of the distinct population of a
* reported tuple (selected with a primary key) at the threshold.
*/
public FdtSketch(final double threshold, final double rse) {
super(computeLgK(threshold, rse));
}
/**
* Copy Constructor
* @param sketch the sketch to copy
*/
public FdtSketch(final FdtSketch sketch) {
super(sketch);
}
/**
* @return a deep copy of this sketch
*/
@Override
public FdtSketch copy() {
return new FdtSketch(this);
}
/**
* Update the sketch with the given string array tuple.
* @param tuple the given string array tuple.
*/
public void update(final String[] tuple) {
super.update(tuple, tuple);
}
/**
* Returns an ordered List of Groups of the most frequent distinct population of subset tuples
* represented by the count of entries of each group.
* @param priKeyIndices these indices define the dimensions used for the Primary Keys.
* @param limit the maximum number of groups to return. If this value is ≤ 0, all
* groups will be returned.
* @param numStdDev the number of standard deviations for the upper and lower error bounds,
* this value is an integer and must be one of 1, 2, or 3.
* <a href="{@docRoot}/resources/dictionary.html#numStdDev">See Number of Standard Deviations</a>
* @param sep the separator character
* @return an ordered List of Groups of the most frequent distinct population of subset tuples
* represented by the count of entries of each group.
*/
public List<Group> getResult(final int[] priKeyIndices, final int limit, final int numStdDev,
final char sep) {
final PostProcessor proc = new PostProcessor(this, new Group(), sep);
return proc.getGroupList(priKeyIndices, numStdDev, limit);
}
/**
* Returns the PostProcessor that enables multiple queries against the sketch results.
* This assumes the default Group and the default separator character '|'.
* @return the PostProcessor
*/
public PostProcessor getPostProcessor() {
return getPostProcessor(new Group(), '|');
}
/**
* Returns the PostProcessor that enables multiple queries against the sketch results.
* @param group the Group class to use during post processing.
* @param sep the separator character.
* @return the PostProcessor
*/
public PostProcessor getPostProcessor(final Group group, final char sep) {
return new PostProcessor(this, group, sep);
}
// Restricted
/**
* Computes LgK given the threshold and RSE.
* @param threshold the fraction, between zero and 1.0, of the total stream length that defines
* a "Frequent" (or heavy) tuple.
* @param rse the maximum Relative Standard Error for the estimate of the distinct population of a
* reported tuple (selected with a primary key) at the threshold.
* @return LgK
*/
static int computeLgK(final double threshold, final double rse) {
final double v = Math.ceil(1.0 / (threshold * rse * rse));
final int lgK = (int) Math.ceil(Math.log(v) / Math.log(2));
if (lgK > ThetaUtil.MAX_LG_NOM_LONGS) {
throw new SketchesArgumentException("Requested Sketch (LgK = " + lgK + " > 2^26), "
+ "either increase the threshold, the rse or both.");
}
return lgK;
}
}
| 2,629 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/fdt/PostProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.fdt;
import static org.apache.datasketches.common.Util.ceilingIntPowerOf2;
import static org.apache.datasketches.thetacommon.HashOperations.hashSearchOrInsert;
import static org.apache.datasketches.tuple.Util.stringHash;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import org.apache.datasketches.tuple.TupleSketchIterator;
import org.apache.datasketches.tuple.strings.ArrayOfStringsSummary;
/**
* This processes the contents of a FDT sketch to extract the
* primary keys with the most frequent unique combinations of the non-primary dimensions.
* The source sketch is not modified.
*
* @author Lee Rhodes
*/
public class PostProcessor {
private final FdtSketch sketch;
private final char sep;
private int groupCount;
@SuppressWarnings("unused")
private Group group; //uninitialized
//simple hash-map
private boolean mapValid;
private final int mapArrSize;
private final long[] hashArr;
private final String[] priKeyArr;
private final int[] counterArr;
/**
* Construct with a populated FdtSketch
* @param sketch the given sketch to query.
* @param group the Group
* @param sep the separator character
*/
public PostProcessor(final FdtSketch sketch, final Group group, final char sep) {
Objects.requireNonNull(sketch, "sketch must be non-null");
Objects.requireNonNull(group, "group must be non-null");
this.sketch = sketch.copy();
this.sep = sep;
final int numEntries = sketch.getRetainedEntries();
mapArrSize = ceilingIntPowerOf2((int)(numEntries / 0.75));
hashArr = new long[mapArrSize];
priKeyArr = new String[mapArrSize];
counterArr = new int[mapArrSize];
mapValid = false;
this.group = group;
}
/**
* Returns the number of groups in the final sketch.
* @return the number of groups in the final sketch.
*/
public int getGroupCount() {
return groupCount;
}
/**
* Return the most frequent Groups associated with Primary Keys based on the size of the groups.
* @param priKeyIndices the indices of the primary dimensions
* @param numStdDev the number of standard deviations for the error bounds, this value is an
* integer and must be one of 1, 2, or 3.
* <a href="{@docRoot}/resources/dictionary.html#numStdDev">See Number of Standard Deviations</a>
* @param limit the maximum number of rows to return. If ≤ 0, all rows will be returned.
* @return the most frequent Groups associated with Primary Keys based on the size of the groups.
*/
public List<Group> getGroupList(final int[] priKeyIndices, final int numStdDev,
final int limit) {
//allows subsequent queries with different priKeyIndices without rebuilding the map
if (!mapValid) { populateMap(priKeyIndices); }
return populateList(numStdDev, limit);
}
/**
* Scan each entry in the sketch. Count the number of duplicate occurrences of each
* primary key in a hash map.
* @param priKeyIndices identifies the primary key indices
*/
private void populateMap(final int[] priKeyIndices) {
final TupleSketchIterator<ArrayOfStringsSummary> it = sketch.iterator();
Arrays.fill(hashArr, 0L);
Arrays.fill(priKeyArr, null);
Arrays.fill(counterArr, 0);
groupCount = 0;
final int lgMapArrSize = Integer.numberOfTrailingZeros(mapArrSize);
while (it.next()) {
//getSummary() is not a copy, but getValue() is
final String[] arr = it.getSummary().getValue();
final String priKey = getPrimaryKey(arr, priKeyIndices, sep);
final long hash = stringHash(priKey);
final int index = hashSearchOrInsert(hashArr, lgMapArrSize, hash);
if (index < 0) { //was empty, hash inserted
final int idx = -(index + 1); //actual index
counterArr[idx] = 1;
groupCount++;
priKeyArr[idx] = priKey;
} else { //found, duplicate
counterArr[index]++; //increment
}
}
mapValid = true;
}
/**
* Create the list of groups along with the error statistics
* @param numStdDev number of standard deviations
* @param limit the maximum size of the list to return
* @return the list of groups along with the error statistics
*/
private List<Group> populateList(final int numStdDev, final int limit) {
final List<Group> list = new ArrayList<>();
for (int i = 0; i < mapArrSize; i++) {
if (hashArr[i] != 0) {
final String priKey = priKeyArr[i];
final int count = counterArr[i];
final double est = sketch.getEstimate(count);
final double ub = sketch.getUpperBound(numStdDev, count);
final double lb = sketch.getLowerBound(numStdDev, count);
final double thresh = (double) count / sketch.getRetainedEntries();
final double rse = (sketch.getUpperBound(1, count) / est) - 1.0;
final Group gp = new Group();
gp.init(priKey, count, est, ub, lb, thresh, rse);
list.add(gp);
}
}
list.sort(null); //Comparable implemented in Group
final int totLen = list.size();
final List<Group> returnList;
if ((limit > 0) && (limit < totLen)) {
returnList = list.subList(0, limit);
} else {
returnList = list;
}
return returnList;
}
/**
* Extract simple string Primary Key defined by the <i>priKeyIndices</i> from the given tuple.
* @param tuple the given tuple containing the Primary Key
* @param priKeyIndices the indices indicating the ordering and selection of dimensions defining
* the Primary Key
* @param sep the separator character
* @return a simple string Primary Key defined by the <i>priKeyIndices</i> from the given tuple.
*/
//also used by test
private static String getPrimaryKey(final String[] tuple, final int[] priKeyIndices,
final char sep) {
assert priKeyIndices.length < tuple.length;
final StringBuilder sb = new StringBuilder();
final int keys = priKeyIndices.length;
for (int i = 0; i < keys; i++) {
final int idx = priKeyIndices[i];
sb.append(tuple[idx]);
if ((i + 1) < keys) { sb.append(sep); }
}
return sb.toString();
}
}
| 2,630 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/fdt/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Frequent Distinct Tuples Sketch
*/
package org.apache.datasketches.fdt;
| 2,631 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static java.lang.Math.abs;
import static java.lang.Math.ceil;
import static java.lang.Math.exp;
import static java.lang.Math.log;
import static java.lang.Math.max;
import static java.lang.Math.min;
import static java.lang.Math.pow;
import static java.lang.Math.round;
import static org.apache.datasketches.common.Family.KLL;
import static org.apache.datasketches.common.Util.floorPowerOf2;
import static org.apache.datasketches.kll.KllPreambleUtil.DATA_START_ADR;
import static org.apache.datasketches.kll.KllPreambleUtil.EMPTY_BIT_MASK;
import static org.apache.datasketches.kll.KllPreambleUtil.LEVEL_ZERO_SORTED_BIT_MASK;
import static org.apache.datasketches.kll.KllPreambleUtil.SINGLE_ITEM_BIT_MASK;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_EMPTY;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_FULL;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_SINGLE;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.UPDATABLE;
import static org.apache.datasketches.kll.KllSketch.SketchType.DOUBLES_SKETCH;
import static org.apache.datasketches.kll.KllSketch.SketchType.FLOATS_SKETCH;
import static org.apache.datasketches.kll.KllSketch.SketchType.ITEMS_SKETCH;
import static org.apache.datasketches.quantilescommon.QuantilesAPI.UNSUPPORTED_MSG;
import java.nio.ByteOrder;
import java.util.Arrays;
import org.apache.datasketches.common.ArrayOfItemsSerDe;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.common.Util;
import org.apache.datasketches.kll.KllSketch.SketchStructure;
import org.apache.datasketches.kll.KllSketch.SketchType;
import org.apache.datasketches.memory.WritableBuffer;
import org.apache.datasketches.memory.WritableMemory;
/**
* This class provides some useful sketch analysis tools that are used internally.
*
* @author Lee Rhodes
*/
@SuppressWarnings("unchecked")
final class KllHelper {
static class GrowthStats {
SketchType sketchType;
int k;
int m;
long givenN;
long maxN;
int numLevels;
int maxItems;
int compactBytes;
int updatableBytes;
}
static class LevelStats {
long n;
public int numLevels;
int numItems;
LevelStats(final long n, final int numLevels, final int numItems) {
this.n = n;
this.numLevels = numLevels;
this.numItems = numItems;
}
}
static final double EPS_DELTA_THRESHOLD = 1E-6;
static final double MIN_EPS = 4.7634E-5;
static final double PMF_COEF = 2.446;
static final double PMF_EXP = 0.9433;
static final double CDF_COEF = 2.296;
static final double CDF_EXP = 0.9723;
/**
* This is the exact powers of 3 from 3^0 to 3^30 where the exponent is the index
*/
private static long[] powersOfThree =
new long[] {1, 3, 9, 27, 81, 243, 729, 2187, 6561, 19683, 59049, 177147, 531441,
1594323, 4782969, 14348907, 43046721, 129140163, 387420489, 1162261467,
3486784401L, 10460353203L, 31381059609L, 94143178827L, 282429536481L,
847288609443L, 2541865828329L, 7625597484987L, 22876792454961L, 68630377364883L,
205891132094649L};
/**
* Checks the validity of the given k
* @param k must be greater than 7 and less than 65536.
*/
static void checkK(final int k, final int m) {
if (k < m || k > KllSketch.MAX_K) {
throw new SketchesArgumentException(
"K must be >= " + m + " and <= " + KllSketch.MAX_K + ": " + k);
}
}
static void checkM(final int m) {
if (m < KllSketch.MIN_M || m > KllSketch.MAX_M || ((m & 1) == 1)) {
throw new SketchesArgumentException(
"M must be >= 2, <= 8 and even: " + m);
}
}
/**
* Returns the approximate maximum number of items that this sketch can handle
* @param k The sizing / accuracy parameter of the sketch in items.
* Note: this method actually works for k items up to k = 2^29 and 61 levels,
* however only k items up to (2^16 - 1) are currently used by the sketch.
* @param m the size of the smallest level in items. Default is 8.
* @param numLevels the upper bound number of levels based on <i>n</i> items.
* @return the total item capacity of the sketch.
*/
static int computeTotalItemCapacity(final int k, final int m, final int numLevels) {
long total = 0;
for (int level = 0; level < numLevels; level++) {
total += levelCapacity(k, numLevels, level, m);
}
return (int) total;
}
/**
* Convert the individual weights into cumulative weights.
* An array of {1,1,1,1} becomes {1,2,3,4}
* @param array of actual weights from the sketch, where first element is not zero.
* @return total weight
*/
public static long convertToCumulative(final long[] array) {
long subtotal = 0;
for (int i = 0; i < array.length; i++) {
final long newSubtotal = subtotal + array[i];
subtotal = array[i] = newSubtotal;
}
return subtotal;
}
static int currentLevelSizeItems(final int level, final int numLevels, final int[] levels) {
if (level >= numLevels) { return 0; }
return levels[level + 1] - levels[level];
}
/**
* Given k, m, and numLevels, this computes and optionally prints the structure of the sketch when the given
* number of levels are completely filled.
* @param k the given user configured sketch parameter
* @param m the given user configured sketch parameter
* @param numLevels the given number of levels of the sketch
* @param printSketchStructure if true will print the details of the sketch structure at the given numLevels.
* @return LevelStats with the final summary of the sketch's cumulative N,
* and cumulative items at the given numLevels.
*/
static LevelStats getFinalSketchStatsAtNumLevels(
final int k,
final int m,
final int numLevels,
final boolean printSketchStructure) {
int cumItems = 0;
long cumN = 0;
if (printSketchStructure) {
println("SKETCH STRUCTURE:");
println("Given K : " + k);
println("Given M : " + m);
println("Given NumLevels: " + numLevels);
printf("%6s %8s %12s %18s %18s\n", "Level", "Items", "CumItems", "N at Level", "CumN");
}
for (int level = 0; level < numLevels; level++) {
final LevelStats lvlStats = getLevelCapacityItems(k, m, numLevels, level);
cumItems += lvlStats.numItems;
cumN += lvlStats.n;
if (printSketchStructure) {
printf("%6d %,8d %,12d %,18d %,18d\n", level, lvlStats.numItems, cumItems, lvlStats.n, cumN);
}
}
return new LevelStats(cumN, numLevels, cumItems);
}
/**
* This method is for direct Double and Float sketches only.
* Given k, m, n, and the sketch type, this computes (and optionally prints) the growth scheme for a sketch as it
* grows large enough to accommodate a stream length of n items.
* @param k the given user configured sketch parameter
* @param m the given user configured sketch parameter
* @param n the desired stream length
* @param sketchType the given sketch type: either DOUBLES_SKETCH or FLOATS_SKETCH.
* @param printGrowthScheme if true the entire growth scheme of the sketch will be printed.
* @return GrowthStats with the final numItems of the growth scheme
*/
static GrowthStats getGrowthSchemeForGivenN(
final int k,
final int m,
final long n,
final SketchType sketchType,
final boolean printGrowthScheme) {
if (sketchType == ITEMS_SKETCH) { throw new SketchesArgumentException(UNSUPPORTED_MSG); }
LevelStats lvlStats;
final GrowthStats gStats = new GrowthStats();
gStats.numLevels = 0;
gStats.k = k;
gStats.m = m;
gStats.givenN = n;
gStats.sketchType = sketchType;
if (printGrowthScheme) {
println("GROWTH SCHEME:");
println("Given SketchType: " + gStats.sketchType.toString());
println("Given K : " + gStats.k);
println("Given M : " + gStats.m);
println("Given N : " + gStats.givenN);
printf("%10s %10s %20s %13s %15s\n", "NumLevels", "MaxItems", "MaxN", "CompactBytes", "UpdatableBytes");
}
final int typeBytes = sketchType.getBytes();
do {
gStats.numLevels++; //
lvlStats = getFinalSketchStatsAtNumLevels(gStats.k, gStats.m, gStats.numLevels, false);
gStats.maxItems = lvlStats.numItems;
gStats.maxN = lvlStats.n; //
gStats.compactBytes =
gStats.maxItems * typeBytes + gStats.numLevels * Integer.BYTES + 2 * typeBytes + DATA_START_ADR;
gStats.updatableBytes = gStats.compactBytes + Integer.BYTES;
if (printGrowthScheme) {
printf("%10d %,10d %,20d %,13d %,15d\n",
gStats.numLevels, gStats.maxItems, gStats.maxN, gStats.compactBytes, gStats.updatableBytes);
}
} while (lvlStats.n < n);
//gStats.numLevels = lvlStats.numLevels; //
//gStats.maxItems = lvlStats.numItems; //
return gStats;
}
// constants were derived as the best fit to 99 percentile empirically measured max error in
// thousands of trials
static int getKFromEpsilon(final double epsilon, final boolean pmf) {
//Ensure that eps is >= than the lowest possible eps given MAX_K and pmf=false.
final double eps = max(epsilon, MIN_EPS);
final double kdbl = pmf
? exp(log(PMF_COEF / eps) / PMF_EXP)
: exp(log(CDF_COEF / eps) / CDF_EXP);
final double krnd = round(kdbl);
final double del = abs(krnd - kdbl);
final int k = (int) (del < EPS_DELTA_THRESHOLD ? krnd : ceil(kdbl));
return max(KllSketch.MIN_M, min(KllSketch.MAX_K, k));
}
/**
* Given k, m, numLevels, this computes the item capacity of a single level.
* @param k the given user sketch configuration parameter
* @param m the given user sketch configuration parameter
* @param numLevels the given number of levels of the sketch
* @param level the specific level to compute its item capacity
* @return LevelStats with the computed N and items for the given level.
*/
static LevelStats getLevelCapacityItems(
final int k,
final int m,
final int numLevels,
final int level) {
final int items = KllHelper.levelCapacity(k, numLevels, level, m);
final long n = (long)items << level;
return new LevelStats(n, numLevels, items);
}
/**
* Gets the normalized rank error given k and pmf.
* Static method version of the <i>getNormalizedRankError(boolean)</i>.
* @param k the configuration parameter
* @param pmf if true, returns the "double-sided" normalized rank error for the getPMF() function.
* Otherwise, it is the "single-sided" normalized rank error for all the other queries.
* @return if pmf is true, the normalized rank error for the getPMF() function.
* Otherwise, it is the "single-sided" normalized rank error for all the other queries.
* @see KllHeapDoublesSketch
*/
// constants were derived as the best fit to 99 percentile empirically measured max error in
// thousands of trials
static double getNormalizedRankError(final int k, final boolean pmf) {
return pmf
? PMF_COEF / pow(k, PMF_EXP)
: CDF_COEF / pow(k, CDF_EXP);
}
static int getNumRetainedAboveLevelZero(final int numLevels, final int[] levels) {
return levels[numLevels] - levels[1];
}
/**
* Returns the item capacity of a specific level.
* @param k the accuracy parameter of the sketch. Because of the Java limits on array sizes,
* the theoretical maximum k is 2^29. However, this implementation of the KLL sketch
* limits k to 2^16 -1.
* @param numLevels the number of current levels in the sketch. Maximum is 61.
* @param level the zero-based index of a level. This varies from 0 to 60.
* @param m the minimum level width. Default is 8.
* @return the capacity of a specific level
*/
static int levelCapacity(final int k, final int numLevels, final int level, final int m) {
assert (k <= (1 << 29));
assert (numLevels >= 1) && (numLevels <= 61);
assert (level >= 0) && (level < numLevels);
final int depth = numLevels - level - 1;
return (int) Math.max(m, intCapAux(k, depth));
}
/**
* This method is for direct Double and Float sketches only and does the following:
* <ul>
* <li>Determines if the required sketch bytes will fit in the current Memory.
* If so, it will stretch the positioning of the arrays to fit. Otherwise:
* <li>Allocates a new WritableMemory of the required size</li>
* <li>Copies over the preamble as is (20 bytes)</li>
* <li>The caller is responsible for filling the remainder and updating the preamble.</li>
* </ul>
*
* @param sketch The current sketch that needs to be expanded.
* @param newLevelsArrLen the element length of the new Levels array.
* @param newItemsArrLen the element length of the new Items array.
* @return the new expanded memory with preamble.
*/
static WritableMemory memorySpaceMgmt(
final KllSketch sketch,
final int newLevelsArrLen,
final int newItemsArrLen) {
final KllSketch.SketchType sketchType = sketch.sketchType;
if (sketchType == ITEMS_SKETCH) { throw new SketchesArgumentException(UNSUPPORTED_MSG); }
final WritableMemory wmem = sketch.getWritableMemory();
if (wmem == null) { return null; }
final WritableMemory oldWmem = wmem;
final int typeBytes = sketchType.getBytes();
final int requiredSketchBytes = DATA_START_ADR
+ newLevelsArrLen * Integer.BYTES
+ 2 * typeBytes
+ newItemsArrLen * typeBytes;
final WritableMemory newWmem;
if (requiredSketchBytes > oldWmem.getCapacity()) { //Acquire new WritableMemory
newWmem = sketch.getMemoryRequestServer().request(oldWmem, requiredSketchBytes);
oldWmem.copyTo(0, newWmem, 0, DATA_START_ADR); //copy preamble (first 20 bytes)
}
else { //Expand or contract in current memory
newWmem = oldWmem;
}
assert requiredSketchBytes <= newWmem.getCapacity();
return newWmem;
}
private static <T> String outputItemsData(final int numLevels, final int[] levelsArr, final Object[] itemsArr,
final ArrayOfItemsSerDe<T> serDe) {
final StringBuilder sb = new StringBuilder();
sb.append("### KLL items data {index, item}:").append(Util.LS);
if (levelsArr[0] > 0) {
sb.append(" Empty/Garbage:" + Util.LS);
for (int i = 0; i < levelsArr[0]; i++) {
sb.append(" ").append(i + ", ").append(serDe.toString((T)itemsArr[i])).append(Util.LS);
}
}
int level = 0;
while (level < numLevels) {
final int fromIndex = levelsArr[level];
final int toIndex = levelsArr[level + 1]; // exclusive
if (fromIndex < toIndex) {
sb.append(" level[").append(level).append("]: offset: " + levelsArr[level] + " wt: " + (1 << level));
sb.append(Util.LS);
}
for (int i = fromIndex; i < toIndex; i++) {
sb.append(" ").append(i + ", ").append(serDe.toString((T)itemsArr[i])).append(Util.LS);
}
level++;
}
sb.append(" level[" + level + "]: offset: " + levelsArr[level] + " (Exclusive)");
sb.append(Util.LS);
sb.append("### End items data").append(Util.LS);
return sb.toString();
}
private static String outputDoublesData(final int numLevels, final int[] levelsArr, final double[] doubleItemsArr) {
final StringBuilder sb = new StringBuilder();
sb.append("### KLL items data {index, item}:").append(Util.LS);
if (levelsArr[0] > 0) {
sb.append(" Empty/Garbage:" + Util.LS);
for (int i = 0; i < levelsArr[0]; i++) {
sb.append(" ").append(i + ", ").append(doubleItemsArr[i]).append(Util.LS);
}
}
int level = 0;
while (level < numLevels) {
final int fromIndex = levelsArr[level];
final int toIndex = levelsArr[level + 1]; // exclusive
if (fromIndex < toIndex) {
sb.append(" level[").append(level).append("]: offset: " + levelsArr[level] + " wt: " + (1 << level));
sb.append(Util.LS);
}
for (int i = fromIndex; i < toIndex; i++) {
sb.append(" ").append(i + ", ").append(doubleItemsArr[i]).append(Util.LS);
}
level++;
}
sb.append(" level[" + level + "]: offset: " + levelsArr[level] + " (Exclusive)");
sb.append(Util.LS);
sb.append("### End items data").append(Util.LS);
return sb.toString();
}
private static String outputFloatsData(final int numLevels, final int[] levelsArr, final float[] floatsItemsArr) {
final StringBuilder sb = new StringBuilder();
sb.append("### KLL items data {index, item}:").append(Util.LS);
if (levelsArr[0] > 0) {
sb.append(" Empty/Garbage:" + Util.LS);
for (int i = 0; i < levelsArr[0]; i++) {
sb.append(" ").append(i + ", ").append(floatsItemsArr[i]).append(Util.LS);
}
}
int level = 0;
while (level < numLevels) {
final int fromIndex = levelsArr[level];
final int toIndex = levelsArr[level + 1]; // exclusive
if (fromIndex < toIndex) {
sb.append(" level[").append(level).append("]: offset: " + levelsArr[level] + " wt: " + (1 << level));
sb.append(Util.LS);
}
for (int i = fromIndex; i < toIndex; i++) {
sb.append(" ").append(i + ", ").append(floatsItemsArr[i]).append(Util.LS);
}
level++;
}
sb.append(" level[" + level + "]: offset: " + levelsArr[level] + " (Exclusive)");
sb.append(Util.LS);
sb.append("### End items data").append(Util.LS);
return sb.toString();
}
static String outputLevels(final int k, final int m, final int numLevels, final int[] levelsArr) {
final StringBuilder sb = new StringBuilder();
sb.append("### KLL levels array:").append(Util.LS)
.append(" level, offset: nominal capacity, actual size").append(Util.LS);
int level = 0;
for ( ; level < numLevels; level++) {
sb.append(" ").append(level).append(", ").append(levelsArr[level]).append(": ")
.append(KllHelper.levelCapacity(k, numLevels, level, m))
.append(", ").append(KllHelper.currentLevelSizeItems(level, numLevels, levelsArr)).append(Util.LS);
}
sb.append(" ").append(level).append(", ").append(levelsArr[level]).append(": (Exclusive)")
.append(Util.LS);
sb.append("### End levels array").append(Util.LS);
return sb.toString();
}
static long sumTheSampleWeights(final int num_levels, final int[] levels) {
long total = 0;
long weight = 1;
for (int i = 0; i < num_levels; i++) {
total += weight * (levels[i + 1] - levels[i]);
weight *= 2;
}
return total;
}
static byte[] toByteArray(final KllSketch srcSk, final boolean updatable) {
//ITEMS_SKETCH byte array is never updatable
final boolean myUpdatable = srcSk.sketchType == ITEMS_SKETCH ? false : updatable;
final long srcN = srcSk.getN();
final SketchStructure tgtStructure;
if (myUpdatable) { tgtStructure = UPDATABLE; }
else if (srcN == 0) { tgtStructure = COMPACT_EMPTY; }
else if (srcN == 1) { tgtStructure = COMPACT_SINGLE; }
else { tgtStructure = COMPACT_FULL; }
final int totalBytes = srcSk.currentSerializedSizeBytes(myUpdatable);
final byte[] bytesOut = new byte[totalBytes];
final WritableBuffer wbuf = WritableMemory.writableWrap(bytesOut).asWritableBuffer(ByteOrder.LITTLE_ENDIAN);
//ints 0,1
final byte preInts = (byte)tgtStructure.getPreInts();
final byte serVer = (byte)tgtStructure.getSerVer();
final byte famId = (byte)(KLL.getID());
final byte flags = (byte) ((srcSk.isEmpty() ? EMPTY_BIT_MASK : 0)
| (srcSk.isLevelZeroSorted() ? LEVEL_ZERO_SORTED_BIT_MASK : 0)
| (srcSk.getN() == 1 ? SINGLE_ITEM_BIT_MASK : 0));
final short k = (short) srcSk.getK();
final byte m = (byte) srcSk.getM();
//load first 8 bytes
wbuf.putByte(preInts);
wbuf.putByte(serVer);
wbuf.putByte(famId);
wbuf.putByte(flags);
wbuf.putShort(k);
wbuf.putByte(m);
wbuf.incrementPosition(1);
if (tgtStructure == COMPACT_EMPTY) {
return bytesOut;
}
if (tgtStructure == COMPACT_SINGLE) {
final byte[] siByteArr = srcSk.getSingleItemByteArr();
final int len = siByteArr.length;
wbuf.putByteArray(siByteArr, 0, len);
wbuf.incrementPosition(-len);
return bytesOut;
}
// Tgt is either COMPACT_FULL or UPDATABLE
//ints 2,3
final long n = srcSk.getN();
//ints 4
final short minK = (short) srcSk.getMinK();
final byte numLevels = (byte) srcSk.getNumLevels();
//end of full preamble
final int[] lvlsArr = srcSk.getLevelsArray(tgtStructure);
final byte[] minMaxByteArr = srcSk.getMinMaxByteArr();
final byte[] itemsByteArr = tgtStructure == COMPACT_FULL
? srcSk.getRetainedItemsByteArr()
: srcSk.getTotalItemsByteArr();
wbuf.putLong(n);
wbuf.putShort(minK);
wbuf.putByte(numLevels);
wbuf.incrementPosition(1);
wbuf.putIntArray(lvlsArr, 0, lvlsArr.length);
wbuf.putByteArray(minMaxByteArr, 0, minMaxByteArr.length);
wbuf.putByteArray(itemsByteArr, 0, itemsByteArr.length);
return bytesOut;
}
static <T> String toStringImpl(final KllSketch sketch, final boolean withLevels, final boolean withData,
final ArrayOfItemsSerDe<T> serDe) {
final SketchType sketchType = sketch.sketchType;
final boolean hasMemory = sketch.hasMemory();
final int k = sketch.getK();
final int m = sketch.getM();
final long n = sketch.getN();
final int numLevels = sketch.getNumLevels();
final int[] fullLevelsArr = sketch.getLevelsArray(UPDATABLE);
//final int[] levelsArr = sketch.getLevelsArray(sketch.sketchStructure);
final String epsPct = String.format("%.3f%%", sketch.getNormalizedRankError(false) * 100);
final String epsPMFPct = String.format("%.3f%%", sketch.getNormalizedRankError(true) * 100);
final boolean compact = sketch.isCompactMemoryFormat();
final StringBuilder sb = new StringBuilder();
final String directStr = hasMemory ? "Direct" : "";
final String compactStr = compact ? "Compact" : "";
final String readOnlyStr = sketch.isReadOnly() ? "true" + ("(" + (compact ? "Format" : "Memory") + ")") : "false";
final String skTypeStr = sketchType.getName();
final String className = "Kll" + directStr + compactStr + skTypeStr;
sb.append(Util.LS).append("### ").append(className).append(" Summary:").append(Util.LS);
sb.append(" K : ").append(k).append(Util.LS);
sb.append(" Dynamic min K : ").append(sketch.getMinK()).append(Util.LS);
sb.append(" M : ").append(m).append(Util.LS);
sb.append(" N : ").append(n).append(Util.LS);
sb.append(" Epsilon : ").append(epsPct).append(Util.LS);
sb.append(" Epsilon PMF : ").append(epsPMFPct).append(Util.LS);
sb.append(" Empty : ").append(sketch.isEmpty()).append(Util.LS);
sb.append(" Estimation Mode : ").append(sketch.isEstimationMode()).append(Util.LS);
sb.append(" Levels : ").append(numLevels).append(Util.LS);
sb.append(" Level 0 Sorted : ").append(sketch.isLevelZeroSorted()).append(Util.LS);
sb.append(" Capacity Items : ").append(fullLevelsArr[numLevels]).append(Util.LS);
sb.append(" Retained Items : ").append(sketch.getNumRetained()).append(Util.LS);
sb.append(" Empty/Garbage Items : ").append(sketch.levelsArr[0]).append(Util.LS);
sb.append(" ReadOnly : ").append(readOnlyStr).append(Util.LS);
if (sketchType != ITEMS_SKETCH) {
sb.append(" Updatable Storage Bytes: ").append(sketch.currentSerializedSizeBytes(true))
.append(Util.LS);
}
sb.append(" Compact Storage Bytes : ").append(sketch.currentSerializedSizeBytes(false))
.append(Util.LS);
if (sketchType == DOUBLES_SKETCH) {
final KllDoublesSketch dblSk = (KllDoublesSketch) sketch;
sb.append(" Min Item : ").append(dblSk.isEmpty() ? Double.NaN : dblSk.getMinItem())
.append(Util.LS);
sb.append(" Max Item : ").append(dblSk.isEmpty() ? Double.NaN : dblSk.getMaxItem())
.append(Util.LS);
}
else if (sketchType == FLOATS_SKETCH) {
final KllFloatsSketch fltSk = (KllFloatsSketch) sketch;
sb.append(" Min Item : ").append(fltSk.isEmpty() ? Float.NaN : fltSk.getMinItem())
.append(Util.LS);
sb.append(" Max Item : ").append(fltSk.isEmpty() ? Float.NaN : fltSk.getMaxItem())
.append(Util.LS);
}
else { //sketchType == ITEMS_SKETCH
final KllItemsSketch<T> itmSk = (KllItemsSketch<T>) sketch;
sb.append(" Min Item : ").append(itmSk.isEmpty() ? "null" : serDe.toString(itmSk.getMinItem()))
.append(Util.LS);
sb.append(" Max Item : ").append(itmSk.isEmpty() ? "null" : serDe.toString(itmSk.getMaxItem()))
.append(Util.LS);
}
sb.append("### End sketch summary").append(Util.LS);
if (withLevels) {
sb.append(outputLevels(k, m, numLevels, fullLevelsArr));
}
if (withData) {
if (sketchType == DOUBLES_SKETCH) {
final KllDoublesSketch dblSk = (KllDoublesSketch) sketch;
final double[] myDoubleItemsArr = dblSk.getDoubleItemsArray();
sb.append(outputDoublesData(numLevels, fullLevelsArr, myDoubleItemsArr));
} else if (sketchType == FLOATS_SKETCH) {
final KllFloatsSketch fltSk = (KllFloatsSketch) sketch;
final float[] myFloatItemsArr = fltSk.getFloatItemsArray();
sb.append(outputFloatsData(numLevels, fullLevelsArr, myFloatItemsArr));
}
else { //sketchType == KllItemsSketch
final KllItemsSketch<T> itmSk = (KllItemsSketch<T>) sketch;
final T[] myItemsArr = itmSk.getTotalItemsArray();
sb.append(outputItemsData(numLevels, fullLevelsArr, myItemsArr, serDe));
}
}
return sb.toString();
}
/**
* Returns very conservative upper bound of the number of levels based on <i>n</i>.
* @param n the length of the stream
* @return floor( log_2(n) )
*/
static int ubOnNumLevels(final long n) {
return 1 + Long.numberOfTrailingZeros(floorPowerOf2(n));
}
/**
* This grows the levels arr by 1 (if needed) and increases the capacity of the items array
* at the bottom. Only numLevels, the levels array and the items array are affected.
* This assumes sketch is writable and UPDATABLE.
* @param sketch the current sketch
*/
static void addEmptyTopLevelToCompletelyFullSketch(final KllSketch sketch) {
final SketchType sketchType = sketch.sketchType;
final int[] myCurLevelsArr = sketch.getLevelsArray(sketch.sketchStructure);
final int myCurNumLevels = sketch.getNumLevels();
final int myCurTotalItemsCapacity = myCurLevelsArr[myCurNumLevels];
final int myNewNumLevels;
final int[] myNewLevelsArr;
final int myNewTotalItemsCapacity;
double[] myCurDoubleItemsArr = null;
double[] myNewDoubleItemsArr = null;
double minDouble = Double.NaN;
double maxDouble = Double.NaN;
float[] myCurFloatItemsArr = null;
float[] myNewFloatItemsArr = null;
float minFloat = Float.NaN;
float maxFloat = Float.NaN;
Object[] myCurItemsArr = null;
Object[] myNewItemsArr = null;
Object minItem = null;
Object maxItem = null;
if (sketchType == DOUBLES_SKETCH) {
final KllDoublesSketch dblSk = (KllDoublesSketch) sketch;
myCurDoubleItemsArr = dblSk.getDoubleItemsArray();
minDouble = dblSk.getMinItem();
maxDouble = dblSk.getMaxItem();
//assert we are following a certain growth scheme
assert myCurDoubleItemsArr.length == myCurTotalItemsCapacity;
}
else if (sketchType == FLOATS_SKETCH) {
final KllFloatsSketch fltSk = (KllFloatsSketch) sketch;
myCurFloatItemsArr = fltSk.getFloatItemsArray();
minFloat = fltSk.getMinItem();
maxFloat = fltSk.getMaxItem();
//assert we are following a certain growth scheme
assert myCurFloatItemsArr.length == myCurTotalItemsCapacity;
}
else { //sketchType == ITEMS_SKETCH
final KllItemsSketch<?> itmSk = (KllItemsSketch<?>) sketch;
myCurItemsArr = itmSk.getTotalItemsArray();
minItem = itmSk.getMinItem();
maxItem = itmSk.getMaxItem();
}
assert myCurLevelsArr[0] == 0; //definition of full is part of the growth scheme
final int deltaItemsCap = levelCapacity(sketch.getK(), myCurNumLevels + 1, 0, sketch.getM());
myNewTotalItemsCapacity = myCurTotalItemsCapacity + deltaItemsCap;
// Check if growing the levels arr if required.
// Note that merging MIGHT over-grow levels_, in which case we might not have to grow it
final boolean growLevelsArr = myCurLevelsArr.length < myCurNumLevels + 2;
// GROW LEVELS ARRAY
if (growLevelsArr) {
//grow levels arr by one and copy the old data to the new array, extra space at the top.
myNewLevelsArr = Arrays.copyOf(myCurLevelsArr, myCurNumLevels + 2);
assert myNewLevelsArr.length == myCurLevelsArr.length + 1;
myNewNumLevels = myCurNumLevels + 1;
sketch.incNumLevels(); //increment for off-heap
} else {
myNewLevelsArr = myCurLevelsArr;
myNewNumLevels = myCurNumLevels;
}
// This loop updates all level indices EXCLUDING the "extra" index at the top
for (int level = 0; level <= myNewNumLevels - 1; level++) {
myNewLevelsArr[level] += deltaItemsCap;
}
myNewLevelsArr[myNewNumLevels] = myNewTotalItemsCapacity; // initialize the new "extra" index at the top
// GROW items ARRAY
if (sketchType == DOUBLES_SKETCH) {
myNewDoubleItemsArr = new double[myNewTotalItemsCapacity];
// copy and shift the current data into the new array
System.arraycopy(myCurDoubleItemsArr, 0, myNewDoubleItemsArr, deltaItemsCap, myCurTotalItemsCapacity);
}
else if (sketchType == FLOATS_SKETCH) {
myNewFloatItemsArr = new float[myNewTotalItemsCapacity];
// copy and shift the current items data into the new array
System.arraycopy(myCurFloatItemsArr, 0, myNewFloatItemsArr, deltaItemsCap, myCurTotalItemsCapacity);
}
else { //sketchType == ITEMS_SKETCH
myNewItemsArr = new Object[myNewTotalItemsCapacity];
// copy and shift the current items data into the new array
System.arraycopy(myCurItemsArr, 0, myNewItemsArr, deltaItemsCap, myCurTotalItemsCapacity);
}
//MEMORY SPACE MANAGEMENT
if (sketch.getWritableMemory() != null) {
final WritableMemory wmem = memorySpaceMgmt(sketch, myNewLevelsArr.length, myNewTotalItemsCapacity);
sketch.setWritableMemory(wmem);
}
//update our sketch with new expanded spaces
sketch.setNumLevels(myNewNumLevels); //for off-heap only
sketch.setLevelsArray(myNewLevelsArr); //the KllSketch copy
if (sketchType == DOUBLES_SKETCH) {
final KllDoublesSketch dblSk = (KllDoublesSketch) sketch;
dblSk.setMinItem(minDouble);
dblSk.setMaxItem(maxDouble);
dblSk.setDoubleItemsArray(myNewDoubleItemsArr);
}
else if (sketchType == FLOATS_SKETCH) {
final KllFloatsSketch fltSk = (KllFloatsSketch) sketch;
fltSk.setMinItem(minFloat);
fltSk.setMaxItem(maxFloat);
fltSk.setFloatItemsArray(myNewFloatItemsArr);
}
else { //sketchType == ITEMS_SKETCH
final KllItemsSketch<?> itmSk = (KllItemsSketch<?>) sketch;
itmSk.setMinItem(minItem);
itmSk.setMaxItem(maxItem);
itmSk.setItemsArray(myNewItemsArr);
}
}
/**
* Finds the first level starting with level 0 that exceeds its nominal capacity
* @param k configured size of sketch. Range [m, 2^16]
* @param m minimum level size. Default is 8.
* @param numLevels one-based number of current levels
* @return level to compact
*/
static int findLevelToCompact(final int k, final int m, final int numLevels, final int[] levels) {
int level = 0;
while (true) {
assert level < numLevels;
final int pop = levels[level + 1] - levels[level];
final int cap = KllHelper.levelCapacity(k, numLevels, level, m);
if (pop >= cap) {
return level;
}
level++;
}
}
/**
* Computes the actual item capacity of a given level given its depth index.
* If the depth of levels exceeds 30, this uses a folding technique to accurately compute the
* actual level capacity up to a depth of 60. Without folding, the internal calculations would
* exceed the capacity of a long.
* @param k the configured k of the sketch
* @param depth the zero-based index of the level being computed.
* @return the actual capacity of a given level given its depth index.
*/
private static long intCapAux(final int k, final int depth) {
if (depth <= 30) { return intCapAuxAux(k, depth); }
final int half = depth / 2;
final int rest = depth - half;
final long tmp = intCapAuxAux(k, half);
return intCapAuxAux(tmp, rest);
}
/**
* Performs the integer based calculation of an individual level (or folded level).
* @param k the configured k of the sketch
* @param depth depth the zero-based index of the level being computed.
* @return the actual capacity of a given level given its depth index.
*/
private static long intCapAuxAux(final long k, final int depth) {
final long twok = k << 1; // for rounding pre-multiply by 2
final long tmp = ((twok << depth) / powersOfThree[depth]);
final long result = ((tmp + 1L) >>> 1); // add 1 and divide by 2
assert (result <= k);
return result;
}
/**
* @param fmt format
* @param args arguments
*/
private static void printf(final String fmt, final Object ... args) {
//System.out.printf(fmt, args); //Disable
}
/**
* Println Object o
* @param o object to print
*/
private static void println(final Object o) {
//System.out.println(o.toString()); //Disable
}
}
| 2,632 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllItemsHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static java.lang.Math.max;
import static java.lang.Math.min;
import static org.apache.datasketches.common.Util.isEven;
import static org.apache.datasketches.common.Util.isOdd;
import static org.apache.datasketches.kll.KllHelper.findLevelToCompact;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Random;
import org.apache.datasketches.common.Util;
/**
* Static methods to support KllItemsSketch
* @author Kevin Lang
* @author Lee Rhodes
*/
@SuppressWarnings("unchecked")
final class KllItemsHelper<T> {
/**
* The following code is only valid in the special case of exactly reaching capacity while updating.
* It cannot be used while merging, while reducing k, or anything else.
* @param itmSk the current KllItemsSketch
*/
private static <T> void compressWhileUpdatingSketch(final KllItemsSketch<T> itmSk) {
final int level =
findLevelToCompact(itmSk.getK(), itmSk.getM(), itmSk.getNumLevels(), itmSk.levelsArr);
if (level == itmSk.getNumLevels() - 1) {
//The level to compact is the top level, thus we need to add a level.
//Be aware that this operation grows the items array,
//shifts the items data and the level boundaries of the data,
//and grows the levels array and increments numLevels_.
KllHelper.addEmptyTopLevelToCompletelyFullSketch(itmSk);
}
//after this point, the levelsArray will not be expanded, only modified.
final int[] myLevelsArr = itmSk.levelsArr;
final int rawBeg = myLevelsArr[level];
final int rawEnd = myLevelsArr[level + 1];
// +2 is OK because we already added a new top level if necessary
final int popAbove = myLevelsArr[level + 2] - rawEnd;
final int rawPop = rawEnd - rawBeg;
final boolean oddPop = isOdd(rawPop);
final int adjBeg = oddPop ? rawBeg + 1 : rawBeg;
final int adjPop = oddPop ? rawPop - 1 : rawPop;
final int halfAdjPop = adjPop / 2;
//the following is specific to generic Items
final Object[] myItemsArr = itmSk.getTotalItemsArray();
if (level == 0) { // level zero might not be sorted, so we must sort it if we wish to compact it
Arrays.sort((T[])myItemsArr, adjBeg, adjBeg + adjPop, itmSk.comparator);
}
if (popAbove == 0) {
KllItemsHelper.randomlyHalveUpItems(myItemsArr, adjBeg, adjPop, KllSketch.random);
} else {
KllItemsHelper.randomlyHalveDownItems(myItemsArr, adjBeg, adjPop, KllSketch.random);
KllItemsHelper.mergeSortedItemsArrays(
myItemsArr, adjBeg, halfAdjPop,
myItemsArr, rawEnd, popAbove,
myItemsArr, adjBeg + halfAdjPop, itmSk.comparator);
}
int newIndex = myLevelsArr[level + 1] - halfAdjPop; // adjust boundaries of the level above
itmSk.setLevelsArrayAt(level + 1, newIndex);
if (oddPop) {
itmSk.setLevelsArrayAt(level, myLevelsArr[level + 1] - 1); // the current level now contains one item
myItemsArr[myLevelsArr[level]] = myItemsArr[rawBeg]; // namely this leftover guy
} else {
itmSk.setLevelsArrayAt(level, myLevelsArr[level + 1]); // the current level is now empty
}
// verify that we freed up halfAdjPop array slots just below the current level
assert myLevelsArr[level] == rawBeg + halfAdjPop;
// finally, we need to shift up the data in the levels below
// so that the freed-up space can be used by level zero
if (level > 0) {
final int amount = rawBeg - myLevelsArr[0];
System.arraycopy(myItemsArr, myLevelsArr[0], myItemsArr, myLevelsArr[0] + halfAdjPop, amount);
}
for (int lvl = 0; lvl < level; lvl++) {
newIndex = myLevelsArr[lvl] + halfAdjPop; //adjust boundary
itmSk.setLevelsArrayAt(lvl, newIndex);
}
itmSk.setItemsArray(myItemsArr);
}
//assumes readOnly = false, and UPDATABLE, called from KllItemSketch::merge
static <T> void mergeItemImpl(final KllItemsSketch<T> mySketch,
final KllItemsSketch<T> otherItmSk, final Comparator<? super T> comp) {
if (otherItmSk.isEmpty()) { return; }
//capture my key mutable fields before doing any merging
final boolean myEmpty = mySketch.isEmpty();
final Object myMin = myEmpty ? null : mySketch.getMinItem();
final Object myMax = myEmpty ? null : mySketch.getMaxItem();
final int myMinK = mySketch.getMinK();
final long finalN = mySketch.getN() + otherItmSk.getN();
//buffers that are referenced multiple times
final int otherNumLevels = otherItmSk.getNumLevels();
final int[] otherLevelsArr = otherItmSk.levelsArr;
final Object[] otherItemsArr;
//MERGE: update this sketch with level0 items from the other sketch
if (otherItmSk.isCompactSingleItem()) {
updateItem(mySketch, otherItmSk.getSingleItem(), comp);
otherItemsArr = new Object[0];
} else {
otherItemsArr = otherItmSk.getTotalItemsArray();
for (int i = otherLevelsArr[0]; i < otherLevelsArr[1]; i++) {
updateItem(mySketch, otherItemsArr[i], comp);
}
}
//After the level 0 update, we capture the intermediate state of levels and items arrays...
final int myCurNumLevels = mySketch.getNumLevels();
final int[] myCurLevelsArr = mySketch.levelsArr;
final Object[] myCurItemsArr = mySketch.getTotalItemsArray();
// then rename them and initialize in case there are no higher levels
int myNewNumLevels = myCurNumLevels;
int[] myNewLevelsArr = myCurLevelsArr;
Object[] myNewItemsArr = myCurItemsArr;
//merge higher levels if they exist
if (otherNumLevels > 1 && !otherItmSk.isCompactSingleItem()) {
final int tmpSpaceNeeded = mySketch.getNumRetained()
+ KllHelper.getNumRetainedAboveLevelZero(otherNumLevels, otherLevelsArr);
final Object[] workbuf = new Object[tmpSpaceNeeded];
final int ub = KllHelper.ubOnNumLevels(finalN);
final int[] worklevels = new int[ub + 2]; // ub+1 does not work
final int[] outlevels = new int[ub + 2];
final int provisionalNumLevels = max(myCurNumLevels, otherNumLevels);
populateItemWorkArrays(workbuf, worklevels, provisionalNumLevels,
myCurNumLevels, myCurLevelsArr, myCurItemsArr,
otherNumLevels, otherLevelsArr, otherItemsArr, comp);
// notice that workbuf is being used as both the input and output
final int[] result = generalItemsCompress(mySketch.getK(), mySketch.getM(), provisionalNumLevels,
workbuf, worklevels, workbuf, outlevels, mySketch.isLevelZeroSorted(), KllSketch.random, comp);
final int targetItemCount = result[1]; //was finalCapacity. Max size given k, m, numLevels
final int curItemCount = result[2]; //was finalPop
// now we need to finalize the results for mySketch
//THE NEW NUM LEVELS
myNewNumLevels = result[0];
assert myNewNumLevels <= ub; // ub may be much bigger
// THE NEW ITEMS ARRAY
myNewItemsArr = (targetItemCount == myCurItemsArr.length)
? myCurItemsArr
: new Object[targetItemCount];
final int freeSpaceAtBottom = targetItemCount - curItemCount;
//shift the new items array create space at bottom
System.arraycopy(workbuf, outlevels[0], myNewItemsArr, freeSpaceAtBottom, curItemCount);
final int theShift = freeSpaceAtBottom - outlevels[0];
//calculate the new levels array length
final int finalLevelsArrLen;
if (myCurLevelsArr.length < myNewNumLevels + 1) { finalLevelsArrLen = myNewNumLevels + 1; }
else { finalLevelsArrLen = myCurLevelsArr.length; }
//THE NEW LEVELS ARRAY
myNewLevelsArr = new int[finalLevelsArrLen];
for (int lvl = 0; lvl < myNewNumLevels + 1; lvl++) { // includes the "extra" index
myNewLevelsArr[lvl] = outlevels[lvl] + theShift;
}
//MEMORY SPACE MANAGEMENT
//not used
}
//Update Preamble:
mySketch.setN(finalN);
if (otherItmSk.isEstimationMode()) { //otherwise the merge brings over exact items.
mySketch.setMinK(min(myMinK, otherItmSk.getMinK()));
}
//Update numLevels, levelsArray, items
mySketch.setNumLevels(myNewNumLevels);
mySketch.setLevelsArray(myNewLevelsArr);
mySketch.setItemsArray(myNewItemsArr);
//Update min, max items
final Object otherMin = otherItmSk.getMinItem();
final Object otherMax = otherItmSk.getMaxItem();
if (myEmpty) {
mySketch.setMinItem(otherMin);
mySketch.setMaxItem(otherMax);
} else {
mySketch.setMinItem(Util.minT(myMin, otherMin, comp));
mySketch.setMaxItem(Util.maxT(myMax, otherMax, comp));
}
assert KllHelper.sumTheSampleWeights(mySketch.getNumLevels(), mySketch.levelsArr) == mySketch.getN();
}
private static <T> void mergeSortedItemsArrays(
final Object[] bufA, final int startA, final int lenA,
final Object[] bufB, final int startB, final int lenB,
final Object[] bufC, final int startC, final Comparator<? super T> comp) {
final int lenC = lenA + lenB;
final int limA = startA + lenA;
final int limB = startB + lenB;
final int limC = startC + lenC;
int a = startA;
int b = startB;
for (int c = startC; c < limC; c++) {
if (a == limA) {
bufC[c] = bufB[b];
b++;
} else if (b == limB) {
bufC[c] = bufA[a];
a++;
} else if ( Util.lt(bufA[a], bufB[b], comp)) {
bufC[c] = bufA[a];
a++;
} else {
bufC[c] = bufB[b];
b++;
}
}
assert a == limA;
assert b == limB;
}
/**
* Validation Method. This must be modified to use the validation test
* @param buf the items array
* @param start data start
* @param length items array length
* @param random instance of Random
*/
//NOTE For validation Method: Need to modify to run.
private static void randomlyHalveDownItems(final Object[] buf, final int start, final int length,
final Random random) {
assert isEven(length);
final int half_length = length / 2;
final int offset = random.nextInt(2); // disable for validation
//final int offset = deterministicOffset(); // enable for validation
int j = start + offset;
for (int i = start; i < (start + half_length); i++) {
buf[i] = buf[j];
j += 2;
}
}
/**
* Validation Method. This must be modified to use the validation test
* @param buf the items array
* @param start data start
* @param length items array length
* @param random instance of Random
*/
//NOTE For validation Method: Need to modify to run.
private static void randomlyHalveUpItems(final Object[] buf, final int start, final int length,
final Random random) {
assert isEven(length);
final int half_length = length / 2;
final int offset = random.nextInt(2); // disable for validation
//final int offset = deterministicOffset(); // enable for validation
int j = (start + length) - 1 - offset;
for (int i = (start + length) - 1; i >= (start + half_length); i--) {
buf[i] = buf[j];
j -= 2;
}
}
//Called from KllItemsSketch::update and this
static <T> void updateItem(final KllItemsSketch<T> itmSk,
final Object item, final Comparator<? super T> comp) {
if (item == null) { return; } //ignore
if (itmSk.isEmpty()) {
itmSk.setMinItem(item);
itmSk.setMaxItem(item);
} else {
itmSk.setMinItem(Util.minT(itmSk.getMinItem(), item, comp));
itmSk.setMaxItem(Util.maxT(itmSk.getMaxItem(), item, comp));
}
if (itmSk.levelsArr[0] == 0) { compressWhileUpdatingSketch(itmSk); }
final int myLevelsArrAtZero = itmSk.levelsArr[0]; //LevelsArr could be expanded
itmSk.incN();
itmSk.setLevelZeroSorted(false);
final int nextPos = myLevelsArrAtZero - 1;
assert myLevelsArrAtZero >= 0;
itmSk.setLevelsArrayAt(0, nextPos);
itmSk.setItemsArrayAt(nextPos, item);
}
/**
* Compression algorithm used to merge higher levels.
* <p>Here is what we do for each level:</p>
* <ul><li>If it does not need to be compacted, then simply copy it over.</li>
* <li>Otherwise, it does need to be compacted, so...
* <ul><li>Copy zero or one guy over.</li>
* <li>If the level above is empty, halve up.</li>
* <li>Else the level above is nonempty, so halve down, then merge up.</li>
* </ul></li>
* <li>Adjust the boundaries of the level above.</li>
* </ul>
*
* <p>It can be proved that generalCompress returns a sketch that satisfies the space constraints
* no matter how much data is passed in.
* We are pretty sure that it works correctly when inBuf and outBuf are the same.
* All levels except for level zero must be sorted before calling this, and will still be
* sorted afterwards.
* Level zero is not required to be sorted before, and may not be sorted afterwards.</p>
*
* <p>This trashes inBuf and inLevels and modifies outBuf and outLevels.</p>
*
* @param k The sketch parameter k
* @param m The minimum level size
* @param numLevelsIn provisional number of number of levels = max(this.numLevels, other.numLevels)
* @param inBuf work buffer of size = this.getNumRetained() + other.getNumRetainedAboveLevelZero().
* This contains the items array of the other sketch
* @param inLevels work levels array size = ubOnNumLevels(this.n + other.n) + 2
* @param outBuf the same array as inBuf
* @param outLevels the same size as inLevels
* @param isLevelZeroSorted true if this.level 0 is sorted
* @param random instance of java.util.Random
* @param comp Comparator of T
* @return int array of: {numLevels, targetItemCount, currentItemCount)
*/
private static <T> int[] generalItemsCompress(
final int k,
final int m,
final int numLevelsIn,
final Object[] inBuf,
final int[] inLevels,
final Object[] outBuf,
final int[] outLevels,
final boolean isLevelZeroSorted,
final Random random, final Comparator<? super T> comp) {
assert numLevelsIn > 0; // things are too weird if zero levels are allowed
int numLevels = numLevelsIn;
int currentItemCount = inLevels[numLevels] - inLevels[0]; // decreases with each compaction
int targetItemCount = KllHelper.computeTotalItemCapacity(k, m, numLevels); // increases if we add levels
boolean doneYet = false;
outLevels[0] = 0;
int curLevel = -1;
while (!doneYet) {
curLevel++; // start out at level 0
// If we are at the current top level, add an empty level above it for convenience,
// but do not increment numLevels until later
if (curLevel == (numLevels - 1)) {
inLevels[curLevel + 2] = inLevels[curLevel + 1];
}
final int rawBeg = inLevels[curLevel];
final int rawLim = inLevels[curLevel + 1];
final int rawPop = rawLim - rawBeg;
if ((currentItemCount < targetItemCount) || (rawPop < KllHelper.levelCapacity(k, numLevels, curLevel, m))) {
// copy level over as is
// because inBuf and outBuf could be the same, make sure we are not moving data upwards!
assert (rawBeg >= outLevels[curLevel]);
System.arraycopy(inBuf, rawBeg, outBuf, outLevels[curLevel], rawPop);
outLevels[curLevel + 1] = outLevels[curLevel] + rawPop;
}
else {
// The sketch is too full AND this level is too full, so we compact it
// Note: this can add a level and thus change the sketch's capacity
final int popAbove = inLevels[curLevel + 2] - rawLim;
final boolean oddPop = isOdd(rawPop);
final int adjBeg = oddPop ? 1 + rawBeg : rawBeg;
final int adjPop = oddPop ? rawPop - 1 : rawPop;
final int halfAdjPop = adjPop / 2;
if (oddPop) { // copy one guy over
outBuf[outLevels[curLevel]] = inBuf[rawBeg];
outLevels[curLevel + 1] = outLevels[curLevel] + 1;
} else { // copy zero guys over
outLevels[curLevel + 1] = outLevels[curLevel];
}
// level zero might not be sorted, so we must sort it if we wish to compact it
if ((curLevel == 0) && !isLevelZeroSorted) {
Arrays.sort(inBuf, adjBeg, adjBeg + adjPop);
}
if (popAbove == 0) { // Level above is empty, so halve up
randomlyHalveUpItems(inBuf, adjBeg, adjPop, random);
} else { // Level above is nonempty, so halve down, then merge up
randomlyHalveDownItems(inBuf, adjBeg, adjPop, random);
mergeSortedItemsArrays(inBuf, adjBeg, halfAdjPop, inBuf, rawLim, popAbove, inBuf, adjBeg + halfAdjPop, comp);
}
// track the fact that we just eliminated some data
currentItemCount -= halfAdjPop;
// Adjust the boundaries of the level above
inLevels[curLevel + 1] = inLevels[curLevel + 1] - halfAdjPop;
// Increment numLevels if we just compacted the old top level
// This creates some more capacity (the size of the new bottom level)
if (curLevel == (numLevels - 1)) {
numLevels++;
targetItemCount += KllHelper.levelCapacity(k, numLevels, 0, m);
}
} // end of code for compacting a level
// determine whether we have processed all levels yet (including any new levels that we created)
if (curLevel == (numLevels - 1)) { doneYet = true; }
} // end of loop over levels
assert (outLevels[numLevels] - outLevels[0]) == currentItemCount;
return new int[] {numLevels, targetItemCount, currentItemCount};
}
private static <T> void populateItemWorkArrays(
final Object[] workbuf, final int[] worklevels, final int provisionalNumLevels,
final int myCurNumLevels, final int[] myCurLevelsArr, final Object[] myCurItemsArr,
final int otherNumLevels, final int[] otherLevelsArr, final Object[] otherItemsArr,
final Comparator<? super T> comp) {
worklevels[0] = 0;
// Note: the level zero data from "other" was already inserted into "self"
final int selfPopZero = KllHelper.currentLevelSizeItems(0, myCurNumLevels, myCurLevelsArr);
System.arraycopy( myCurItemsArr, myCurLevelsArr[0], workbuf, worklevels[0], selfPopZero);
worklevels[1] = worklevels[0] + selfPopZero;
for (int lvl = 1; lvl < provisionalNumLevels; lvl++) {
final int selfPop = KllHelper.currentLevelSizeItems(lvl, myCurNumLevels, myCurLevelsArr);
final int otherPop = KllHelper.currentLevelSizeItems(lvl, otherNumLevels, otherLevelsArr);
worklevels[lvl + 1] = worklevels[lvl] + selfPop + otherPop;
if (selfPop > 0 && otherPop == 0) {
System.arraycopy(myCurItemsArr, myCurLevelsArr[lvl], workbuf, worklevels[lvl], selfPop);
} else if (selfPop == 0 && otherPop > 0) {
System.arraycopy(otherItemsArr, otherLevelsArr[lvl], workbuf, worklevels[lvl], otherPop);
} else if (selfPop > 0 && otherPop > 0) {
mergeSortedItemsArrays(
myCurItemsArr, myCurLevelsArr[lvl], selfPop,
otherItemsArr, otherLevelsArr[lvl], otherPop,
workbuf, worklevels[lvl], comp);
}
}
}
/*
* Validation Method.
* The following must be enabled for use with the KllItemsValidationTest,
* which is only enabled for manual testing. In addition, two Validation Methods
* above need to be modified.
*/ //NOTE Validation Method: Need to uncomment to use
// static int nextOffset = 0;
//
// private static int deterministicOffset() {
// final int result = nextOffset;
// nextOffset = 1 - nextOffset;
// return result;
// }
}
| 2,633 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllPreambleUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static org.apache.datasketches.common.Family.idToFamily;
import static org.apache.datasketches.common.Util.zeroPad;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_FULL;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_SINGLE;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.UPDATABLE;
import static org.apache.datasketches.kll.KllSketch.SketchType.DOUBLES_SKETCH;
import static org.apache.datasketches.kll.KllSketch.SketchType.FLOATS_SKETCH;
import static org.apache.datasketches.kll.KllSketch.SketchType.ITEMS_SKETCH;
import java.util.Objects;
import org.apache.datasketches.common.ArrayOfItemsSerDe;
import org.apache.datasketches.common.Util;
import org.apache.datasketches.kll.KllSketch.SketchStructure;
import org.apache.datasketches.kll.KllSketch.SketchType;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
/**
* This class defines the serialized data structure and provides access methods for the preamble fields.
*
* <p>The intent of the design of this class was to isolate the detailed knowledge of the bit and
* byte layout of the serialized form of the sketches derived from the base sketch classes into one place.
* This allows the possibility of the introduction of different serialization
* schemes with minimal impact on the rest of the library.</p>
*
* <h3>Visual Layout</h3>
* The low significance bytes of the visual data structure below are on the left.
* The multi-byte primitives are stored in native byte order.
* The numeric <i>byte</i> and <i>short</i> fields are treated as unsigned.
* The numeric <i>int</i> and <i>long</i> fields are treated as signed.
*
* <h3>Preamble Formats</h3>
* The preamble has four formats:
*
* <ul>
* <li>The serialized empty compact structure requires 8 bytes of preamble. It is not updatable.
* It is identified by the <i>enum SketchStructure.COMPACT_EMPTY.</i></li>
*
* <li>The serialized, single-item compact structure requires 8 bytes of preamble, followed by the one item.
* The size of this structure is 8 + itemSize bytes. It is not updatable.
* It is identified by the <i>enum SketchStructure.COMPACT_SINGLE.</i></li>
*
* <li>A serialized, <i>n > 1</i> compact structure requires 20 bytes of preamble (5 ints) followed by
* four variable-sized fields. The details of these fields can be found in the code and are illustrated
* in the table below.
* The 5 int preamble is followed by the <i>levelsArr int[numLevels]</i> as bytes,
* followed by the min and max values as bytes,
* followed by a packed items data array as bytes. There are no empty or garbage slots in this structure.
* It is not updatable.
* It is identified by the <i>enum SketchStructure.COMPACT_FULL</i>.</li>
*
* <li>A serialized, <i>n > 1</i> non-compact, updatable structure requires 20 bytes of preamble (5 ints).
* This is followed by the LevelsArr int[NumLevels + 1], followed by the min and max values, and then
* followed by an items data array that may include empty or garbage slots. It is updatable.
* The details of these fields can be found in the code..
* It is identified by the <i>enum SketchStructure.UPDATABLE</i>. This structure may not be implemented by
* some sketches.</li>
* </ul>
*
* <h3>Visual Layout</h3>
* The fields in braces are those that can be variable in size.
*
* <pre>{@code
* Serialized COMPACT_EMPTY sketch structure, Empty (8 bytes)
* and COMPACT_SINGLE sketch structure, (single item) (8 + itemSize):
* Int Adr: Byte Adr ->
* 0 || 0 | 1 | 2 | 3 |
* | PreambleInts | SerVer | FamID | Flags |
*
* 1 || 4 | 5 | 6 | 7 |
* ||-----------K-----------| M | unused |
*
* 2 || 8 |
* ||{Single Item} ->
*
* Serialized COMPACT_FULL sketch structure, more than one item:
* Int Adr: Byte Adr ->
* 0 || 0 | 1 | 2 | 3 |
* || PreambleInts | SerVer | FamID | Flags |
*
* 1 || 4 | 5 | 6 | 7 |
* ||-----------K-----------| M | unused |
*
* 2,3 || 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 |
* ||-----------------N_LONG-----------------|
*
* 4 || 16 | 17 | 18 | 19 |
* ||------Min K------------|NumLvls| unused |
*
* 5 || 20 |
* { Levels Array }
* { Min Item }
* { Max Item }
* { Items Array }
*
* Serialization Combinations for SerVer and PreambleInts
* | Sketch Structure | SerVer | PreInts |
* |------------------|----------------|------------------|
* | Compact Empty | Empty/Full (1) | Empty/Single (2) | ReadOnly, 8 byte Preamble, nothing else
* | Compact Single | Single (2) | Empty/Single (2) | ReadOnly, 8 byte Preamble + Single Item
* | Compact Full | Empty/Full (1) | Full (5) | ReadOnly, 20 Byte Preamble, Short LevelsArr, Retained Items
* | Updatable | Updatable (3) | Full (5) | Updatable, 20 Byte Preamble, Full LevelsArr, All Items
* | ERROR | Single (2) | Full (5) |
* | ERROR | Updatable (3) | Empty/Single (2) |
* }</pre>
*
* @author Lee Rhodes
*/
final class KllPreambleUtil<T> {
private KllPreambleUtil() {}
static final String LS = System.getProperty("line.separator");
// Preamble byte addresses
static final int PREAMBLE_INTS_BYTE_ADR = 0;
static final int SER_VER_BYTE_ADR = 1;
static final int FAMILY_BYTE_ADR = 2;
static final int FLAGS_BYTE_ADR = 3;
static final int K_SHORT_ADR = 4; // to 5
static final int M_BYTE_ADR = 6;
// 7 is reserved for future use
// SINGLE ITEM ONLY
static final int DATA_START_ADR_SINGLE_ITEM = 8; //also ok for empty
// MULTI-ITEM
static final int N_LONG_ADR = 8; // to 15
static final int MIN_K_SHORT_ADR = 16; // to 17
static final int NUM_LEVELS_BYTE_ADR = 18;
// 19 is reserved for future use
static final int DATA_START_ADR = 20; // Full Sketch, not single item
// Other static members
static final byte SERIAL_VERSION_EMPTY_FULL = 1; // Empty or full preamble, NOT single item format, NOT updatable
static final byte SERIAL_VERSION_SINGLE = 2; // only single-item format, NOT updatable
static final byte SERIAL_VERSION_UPDATABLE = 3; // PreInts=5, Full preamble + LevelsArr + min, max + empty space
static final byte PREAMBLE_INTS_EMPTY_SINGLE = 2; // for empty or single item
static final byte PREAMBLE_INTS_FULL = 5; // Full preamble, not empty nor single item.
static final byte KLL_FAMILY = 15;
// Flag bit masks
static final int EMPTY_BIT_MASK = 1;
static final int LEVEL_ZERO_SORTED_BIT_MASK = 2;
static final int SINGLE_ITEM_BIT_MASK = 4;
/**
* Returns a human readable string summary of the internal state of the given sketch byte array.
* Used primarily in testing.
*
* @param byteArr the given sketch byte array.
* @param includeData if true, includes detail of retained data.
* @return the summary string.
*/
static String toString(final byte[] byteArr, final SketchType sketchType, final boolean includeData) {
final Memory mem = Memory.wrap(byteArr);
return toString(mem, sketchType, includeData, null);
}
/**
* Returns a human readable string summary of the internal state of the given sketch byte array.
* Used primarily in testing.
*
* @param byteArr the given sketch byte array.
* @param includeData if true, includes detail of retained data.
* @param serDe the serialization/deserialization class, required for KllItemsSketch.
* @return the summary string.
*/
static String toString(final byte[] byteArr, final SketchType sketchType, final boolean includeData,
final ArrayOfItemsSerDe<?> serDe) {
final Memory mem = Memory.wrap(byteArr);
return toString(mem, sketchType, includeData, serDe);
}
/**
* Returns a human readable string summary of the internal state of the given Memory.
* Used primarily in testing.
*
* @param mem the given Memory
* @param includeData if true, includes detail of retained data.
* @return the summary string.
*/
static String toString(final Memory mem, final SketchType sketchType, final boolean includeData) {
return toString(mem, sketchType, includeData, null);
}
/**
* Returns a human readable string summary of the internal state of the given Memory.
* Used primarily in testing.
*
* @param mem the given Memory
* @param sketchType the sketch type: FLOATS_SKETCH, DOUBLES_SKETCH, or ITEMS_SKETCH.
* @param includeData if true, includes detail of retained data.
* @param serDe must be supplied for KllItemsSketch, otherwise can be null.
* @return the summary string.
*/
static <T> String toString(final Memory mem, final SketchType sketchType, final boolean includeData,
final ArrayOfItemsSerDe<T> serDe) {
if (sketchType == ITEMS_SKETCH) {
Objects.requireNonNull(serDe, "SerDe parameter must not be null for ITEMS_SKETCH.");
}
final KllMemoryValidate memVal = new KllMemoryValidate(mem, sketchType, serDe);
final SketchStructure myStructure = memVal.sketchStructure;
final int flags = memVal.flags & 0XFF;
final String flagsStr = (flags) + ", 0x" + (Integer.toHexString(flags)) + ", "
+ zeroPad(Integer.toBinaryString(flags), 8);
final int preInts = memVal.preInts; //??
//final boolean updatable = mySketchStructure == UPDATABLE;
final boolean emptyFlag = memVal.emptyFlag;
final int sketchBytes = memVal.sketchBytes;
final int typeBytes = sketchType == DOUBLES_SKETCH ? Double.BYTES : Float.BYTES;
final int familyID = getMemoryFamilyID(mem);
final String famName = idToFamily(familyID).toString();
final StringBuilder sb = new StringBuilder();
sb.append(Util.LS).append("### KLL SKETCH MEMORY SUMMARY:").append(LS);
sb.append("Sketch Type : ").append(sketchType.toString()).append(LS);
sb.append("SketchStructure : ").append(myStructure.toString()).append(LS);
sb.append("Byte 0 : Preamble Ints : ").append(preInts).append(LS);
sb.append("Byte 1 : SerVer : ").append(memVal.serVer).append(LS);
sb.append("Byte 2 : FamilyID : ").append(memVal.familyID).append(LS);
sb.append(" : FamilyName : ").append(famName).append(LS);
sb.append("Byte 3 : Flags Field : ").append(flagsStr).append(LS);
sb.append(" Bit: Flag Name : ").append(LS);
sb.append(" 0: EMPTY : ").append(emptyFlag).append(LS);
sb.append(" 1: LEVEL_ZERO_SORTED : ").append(memVal.level0SortedFlag).append(LS);
sb.append("Bytes 4-5 : K : ").append(memVal.k).append(LS);
sb.append("Byte 6 : Min Level Cap, M : ").append(memVal.m).append(LS);
sb.append("Byte 7 : (Reserved) : ").append(LS);
final long n = memVal.n;
final int minK = memVal.minK;
final int numLevels = memVal.numLevels;
final int[] levelsArr = memVal.levelsArr; //the full levels array
final int retainedItems = levelsArr[numLevels] - levelsArr[0];
if (myStructure == COMPACT_FULL || myStructure == UPDATABLE) {
sb.append("Bytes 8-15 : N : ").append(n).append(LS);
sb.append("Bytes 16-17 : MinK : ").append(minK).append(LS);
sb.append("Byte 18 : NumLevels : ").append(numLevels).append(LS);
}
else { //COMPACT_EMPTY OR COMPACT_SINGLE
sb.append("Assumed : N : ").append(n).append(LS);
sb.append("Assumed : MinK : ").append(minK).append(LS);
sb.append("Assumed : NumLevels : ").append(numLevels).append(LS);
}
sb.append("PreambleBytes : ").append(preInts * Integer.BYTES).append(LS);
sb.append("Sketch Bytes : ").append(sketchBytes).append(LS);
sb.append("Memory Capacity Bytes : ").append(mem.getCapacity()).append(LS);
sb.append("### END KLL Sketch Memory Summary").append(LS);
if (includeData) {
sb.append(LS);
sb.append("### START KLL DATA:").append(LS);
int offsetBytes = 0;
if (myStructure == UPDATABLE) {
sb.append("LEVELS ARR:").append(LS);
offsetBytes = DATA_START_ADR;
for (int i = 0; i < numLevels + 1; i++) {
sb.append(i + ", " + mem.getInt(offsetBytes)).append(LS);
offsetBytes += Integer.BYTES;
}
sb.append("MIN/MAX:").append(LS);
if (sketchType == DOUBLES_SKETCH) {
sb.append(mem.getDouble(offsetBytes)).append(LS);
offsetBytes += typeBytes;
sb.append(mem.getDouble(offsetBytes)).append(LS);
offsetBytes += typeBytes;
} else if (sketchType == FLOATS_SKETCH) {
sb.append(mem.getFloat(offsetBytes)).append(LS);
offsetBytes += typeBytes;
sb.append(mem.getFloat(offsetBytes)).append(LS);
offsetBytes += typeBytes;
} else { //ITEMS_SKETCH
sb.append("<<<Updatable Structure is not suppported by ItemsSketch>>>").append(LS);
}
sb.append("ALL DATA (including empty & garbage data)").append(LS);
final int itemsSpace = (sketchBytes - offsetBytes) / typeBytes;
if (sketchType == DOUBLES_SKETCH) {
for (int i = 0; i < itemsSpace; i++) {
sb.append(i + ", " + mem.getDouble(offsetBytes)).append(LS);
offsetBytes += typeBytes;
}
} else if (sketchType == FLOATS_SKETCH) {
for (int i = 0; i < itemsSpace; i++) {
sb.append(mem.getFloat(offsetBytes)).append(LS);
offsetBytes += typeBytes;
}
} else { //ITEMS_SKETCH
sb.append("<<<Updatable Structure is not suppported by ItemsSketch>>>").append(LS);
}
} else if (myStructure == COMPACT_FULL) {
sb.append("LEVELS ARR:").append(LS);
offsetBytes = DATA_START_ADR;
int j;
for (j = 0; j < numLevels; j++) {
sb.append(j + ", " + mem.getInt(offsetBytes)).append(LS);
offsetBytes += Integer.BYTES;
}
sb.append(j + ", " + levelsArr[numLevels]);
sb.append(" (Top level of Levels Array is absent in Memory)").append(LS);
sb.append("MIN/MAX:").append(LS);
if (sketchType == DOUBLES_SKETCH) {
sb.append(mem.getDouble(offsetBytes)).append(LS);
offsetBytes += typeBytes;
sb.append(mem.getDouble(offsetBytes)).append(LS);
offsetBytes += typeBytes;
} else if (sketchType == FLOATS_SKETCH) {
sb.append(mem.getFloat(offsetBytes)).append(LS);
offsetBytes += typeBytes;
sb.append(mem.getFloat(offsetBytes)).append(LS);
offsetBytes += typeBytes;
} else { //ITEMS_SKETCH
sb.append(serDe.deserializeFromMemory(mem, offsetBytes, 1)[0]).append(LS);
offsetBytes += serDe.sizeOf(mem, offsetBytes, 1);
sb.append(serDe.deserializeFromMemory(mem, offsetBytes, 1)[0]).append(LS);
offsetBytes += serDe.sizeOf(mem, offsetBytes, 1);
}
sb.append("RETAINED DATA").append(LS);
final int itemSpace = (sketchBytes - offsetBytes) / typeBytes;
if (sketchType == DOUBLES_SKETCH) {
for (int i = 0; i < itemSpace; i++) {
sb.append(i + ", " + mem.getDouble(offsetBytes)).append(LS);
offsetBytes += typeBytes;
}
} else if (sketchType == FLOATS_SKETCH) {
for (int i = 0; i < itemSpace; i++) {
sb.append(i + ", " + mem.getFloat(offsetBytes)).append(LS);
offsetBytes += typeBytes;
}
} else { //ITEMS_SKETCH
final T[] itemsArr = serDe.deserializeFromMemory(mem, offsetBytes, retainedItems);
for (int i = 0; i < itemsArr.length; i++) {
sb.append(i + ", " + serDe.toString(itemsArr[i])).append(LS);
}
offsetBytes += serDe.sizeOf(mem, offsetBytes, retainedItems);
}
} else if (myStructure == COMPACT_SINGLE) {
sb.append("SINGLE ITEM DATUM: "); //no LS
if (sketchType == DOUBLES_SKETCH) {
sb.append(mem.getDouble(DATA_START_ADR_SINGLE_ITEM)).append(LS);
} else if (sketchType == FLOATS_SKETCH) {
sb.append(mem.getFloat(DATA_START_ADR_SINGLE_ITEM)).append(LS);
} else { //ITEMS_SKETCH
sb.append(serDe.deserializeFromMemory(mem, DATA_START_ADR_SINGLE_ITEM, 1)[0]).append(LS);
}
} else { //COMPACT_EMPTY
sb.append("EMPTY, NO DATA").append(LS);
}
sb.append("### END KLL DATA:").append(LS);
}
return sb.toString();
}
static int getMemoryPreInts(final Memory mem) {
return mem.getByte(PREAMBLE_INTS_BYTE_ADR) & 0XFF;
}
static int getMemorySerVer(final Memory mem) {
return mem.getByte(SER_VER_BYTE_ADR) & 0XFF;
}
static SketchStructure getMemorySketchStructure(final Memory mem) {
final int preInts = getMemoryPreInts(mem);
final int serVer = getMemorySerVer(mem);
final SketchStructure structure = KllSketch.SketchStructure.getSketchStructure(preInts, serVer);
return structure;
}
static int getMemoryFamilyID(final Memory mem) {
return mem.getByte(FAMILY_BYTE_ADR) & 0XFF;
}
static int getMemoryFlags(final Memory mem) {
return mem.getByte(FLAGS_BYTE_ADR) & 0XFF;
}
static boolean getMemoryEmptyFlag(final Memory mem) {
return (getMemoryFlags(mem) & EMPTY_BIT_MASK) != 0;
}
static boolean getMemoryLevelZeroSortedFlag(final Memory mem) {
return (getMemoryFlags(mem) & LEVEL_ZERO_SORTED_BIT_MASK) != 0;
}
static int getMemoryK(final Memory mem) {
return mem.getShort(K_SHORT_ADR) & 0XFFFF;
}
static int getMemoryM(final Memory mem) {
return mem.getByte(M_BYTE_ADR) & 0XFF;
}
static long getMemoryN(final Memory mem) {
return mem.getLong(N_LONG_ADR);
}
static int getMemoryMinK(final Memory mem) {
return mem.getShort(MIN_K_SHORT_ADR) & 0XFFFF;
}
static int getMemoryNumLevels(final Memory mem) {
return mem.getByte(NUM_LEVELS_BYTE_ADR) & 0XFF;
}
static void setMemoryPreInts(final WritableMemory wmem, final int numPreInts) {
wmem.putByte(PREAMBLE_INTS_BYTE_ADR, (byte) numPreInts);
}
static void setMemorySerVer(final WritableMemory wmem, final int serVer) {
wmem.putByte(SER_VER_BYTE_ADR, (byte) serVer);
}
static void setMemoryFamilyID(final WritableMemory wmem, final int famId) {
wmem.putByte(FAMILY_BYTE_ADR, (byte) famId);
}
static void setMemoryFlags(final WritableMemory wmem, final int flags) {
wmem.putByte(FLAGS_BYTE_ADR, (byte) flags);
}
static void setMemoryEmptyFlag(final WritableMemory wmem, final boolean empty) {
final int flags = getMemoryFlags(wmem);
setMemoryFlags(wmem, empty ? flags | EMPTY_BIT_MASK : flags & ~EMPTY_BIT_MASK);
}
static void setMemoryLevelZeroSortedFlag(final WritableMemory wmem, final boolean levelZeroSorted) {
final int flags = getMemoryFlags(wmem);
setMemoryFlags(wmem, levelZeroSorted ? flags | LEVEL_ZERO_SORTED_BIT_MASK : flags & ~LEVEL_ZERO_SORTED_BIT_MASK);
}
static void setMemoryK(final WritableMemory wmem, final int memK) {
wmem.putShort(K_SHORT_ADR, (short) memK);
}
static void setMemoryM(final WritableMemory wmem, final int memM) {
wmem.putByte(M_BYTE_ADR, (byte) memM);
}
static void setMemoryN(final WritableMemory wmem, final long memN) {
wmem.putLong(N_LONG_ADR, memN);
}
static void setMemoryMinK(final WritableMemory wmem, final int memMinK) {
wmem.putShort(MIN_K_SHORT_ADR, (short) memMinK);
}
static void setMemoryNumLevels(final WritableMemory wmem, final int memNumLevels) {
wmem.putByte(NUM_LEVELS_BYTE_ADR, (byte) memNumLevels);
}
}
| 2,634 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllDoublesHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static java.lang.Math.max;
import static java.lang.Math.min;
import static org.apache.datasketches.common.Util.isEven;
import static org.apache.datasketches.common.Util.isOdd;
import static org.apache.datasketches.kll.KllHelper.findLevelToCompact;
import java.util.Arrays;
import java.util.Random;
import org.apache.datasketches.memory.WritableMemory;
//
/**
* Static methods to support KllDoublesSketch
* @author Kevin Lang
* @author Alexander Saydakov
*/
//
final class KllDoublesHelper {
/**
* The following code is only valid in the special case of exactly reaching capacity while updating.
* It cannot be used while merging, while reducing k, or anything else.
* @param dblSk the current KllDoublesSketch
*/
private static void compressWhileUpdatingSketch(final KllDoublesSketch dblSk) {
final int level =
findLevelToCompact(dblSk.getK(), dblSk.getM(), dblSk.getNumLevels(), dblSk.levelsArr);
if (level == dblSk.getNumLevels() - 1) {
//The level to compact is the top level, thus we need to add a level.
//Be aware that this operation grows the items array,
//shifts the items data and the level boundaries of the data,
//and grows the levels array and increments numLevels_.
KllHelper.addEmptyTopLevelToCompletelyFullSketch(dblSk);
}
//after this point, the levelsArray will not be expanded, only modified.
final int[] myLevelsArr = dblSk.levelsArr;
final int rawBeg = myLevelsArr[level];
final int rawEnd = myLevelsArr[level + 1];
// +2 is OK because we already added a new top level if necessary
final int popAbove = myLevelsArr[level + 2] - rawEnd;
final int rawPop = rawEnd - rawBeg;
final boolean oddPop = isOdd(rawPop);
final int adjBeg = oddPop ? rawBeg + 1 : rawBeg;
final int adjPop = oddPop ? rawPop - 1 : rawPop;
final int halfAdjPop = adjPop / 2;
//the following is specific to Doubles
final double[] myDoubleItemsArr = dblSk.getDoubleItemsArray();
if (level == 0) { // level zero might not be sorted, so we must sort it if we wish to compact it
Arrays.sort(myDoubleItemsArr, adjBeg, adjBeg + adjPop);
}
if (popAbove == 0) {
KllDoublesHelper.randomlyHalveUpDoubles(myDoubleItemsArr, adjBeg, adjPop, KllSketch.random);
} else {
KllDoublesHelper.randomlyHalveDownDoubles(myDoubleItemsArr, adjBeg, adjPop, KllSketch.random);
KllDoublesHelper.mergeSortedDoubleArrays(
myDoubleItemsArr, adjBeg, halfAdjPop,
myDoubleItemsArr, rawEnd, popAbove,
myDoubleItemsArr, adjBeg + halfAdjPop);
}
int newIndex = myLevelsArr[level + 1] - halfAdjPop; // adjust boundaries of the level above
dblSk.setLevelsArrayAt(level + 1, newIndex);
if (oddPop) {
dblSk.setLevelsArrayAt(level, myLevelsArr[level + 1] - 1); // the current level now contains one item
myDoubleItemsArr[myLevelsArr[level]] = myDoubleItemsArr[rawBeg]; // namely this leftover guy
} else {
dblSk.setLevelsArrayAt(level, myLevelsArr[level + 1]); // the current level is now empty
}
// verify that we freed up halfAdjPop array slots just below the current level
assert myLevelsArr[level] == rawBeg + halfAdjPop;
// finally, we need to shift up the data in the levels below
// so that the freed-up space can be used by level zero
if (level > 0) {
final int amount = rawBeg - myLevelsArr[0];
System.arraycopy(myDoubleItemsArr, myLevelsArr[0], myDoubleItemsArr, myLevelsArr[0] + halfAdjPop, amount);
}
for (int lvl = 0; lvl < level; lvl++) {
newIndex = myLevelsArr[lvl] + halfAdjPop; //adjust boundary
dblSk.setLevelsArrayAt(lvl, newIndex);
}
dblSk.setDoubleItemsArray(myDoubleItemsArr);
}
//assumes readOnly = false and UPDATABLE, called from KllDoublesSketch::merge
static void mergeDoubleImpl(final KllDoublesSketch mySketch,
final KllDoublesSketch otherDblSk) {
if (otherDblSk.isEmpty()) { return; }
//capture my key mutable fields before doing any merging
final boolean myEmpty = mySketch.isEmpty();
final double myMin = myEmpty ? Double.NaN : mySketch.getMinItem();
final double myMax = myEmpty ? Double.NaN : mySketch.getMaxItem();
final int myMinK = mySketch.getMinK();
final long finalN = mySketch.getN() + otherDblSk.getN();
//buffers that are referenced multiple times
final int otherNumLevels = otherDblSk.getNumLevels();
final int[] otherLevelsArr = otherDblSk.levelsArr;
final double[] otherDoubleItemsArr;
//MERGE: update this sketch with level0 items from the other sketch
if (otherDblSk.isCompactSingleItem()) {
updateDouble(mySketch, otherDblSk.getDoubleSingleItem());
otherDoubleItemsArr = new double[0];
} else {
otherDoubleItemsArr = otherDblSk.getDoubleItemsArray();
for (int i = otherLevelsArr[0]; i < otherLevelsArr[1]; i++) {
updateDouble(mySketch, otherDoubleItemsArr[i]);
}
}
//After the level 0 update, we capture the intermediate state of levels and items arrays...
final int myCurNumLevels = mySketch.getNumLevels();
final int[] myCurLevelsArr = mySketch.levelsArr;
final double[] myCurDoubleItemsArr = mySketch.getDoubleItemsArray();
// then rename them and initialize in case there are no higher levels
int myNewNumLevels = myCurNumLevels;
int[] myNewLevelsArr = myCurLevelsArr;
double[] myNewDoubleItemsArr = myCurDoubleItemsArr;
//merge higher levels if they exist
if (otherNumLevels > 1 && !otherDblSk.isCompactSingleItem()) {
final int tmpSpaceNeeded = mySketch.getNumRetained()
+ KllHelper.getNumRetainedAboveLevelZero(otherNumLevels, otherLevelsArr);
final double[] workbuf = new double[tmpSpaceNeeded];
final int ub = KllHelper.ubOnNumLevels(finalN);
final int[] worklevels = new int[ub + 2]; // ub+1 does not work
final int[] outlevels = new int[ub + 2];
final int provisionalNumLevels = max(myCurNumLevels, otherNumLevels);
populateDoubleWorkArrays(workbuf, worklevels, provisionalNumLevels,
myCurNumLevels, myCurLevelsArr, myCurDoubleItemsArr,
otherNumLevels, otherLevelsArr, otherDoubleItemsArr);
// notice that workbuf is being used as both the input and output
final int[] result = generalDoublesCompress(mySketch.getK(), mySketch.getM(), provisionalNumLevels,
workbuf, worklevels, workbuf, outlevels, mySketch.isLevelZeroSorted(), KllSketch.random);
final int targetItemCount = result[1]; //was finalCapacity. Max size given k, m, numLevels
final int curItemCount = result[2]; //was finalPop
// now we need to finalize the results for mySketch
//THE NEW NUM LEVELS
myNewNumLevels = result[0];
assert myNewNumLevels <= ub; // ub may be much bigger
// THE NEW ITEMS ARRAY
myNewDoubleItemsArr = (targetItemCount == myCurDoubleItemsArr.length)
? myCurDoubleItemsArr
: new double[targetItemCount];
final int freeSpaceAtBottom = targetItemCount - curItemCount;
//shift the new items array create space at bottom
System.arraycopy(workbuf, outlevels[0], myNewDoubleItemsArr, freeSpaceAtBottom, curItemCount);
final int theShift = freeSpaceAtBottom - outlevels[0];
//calculate the new levels array length
final int finalLevelsArrLen;
if (myCurLevelsArr.length < myNewNumLevels + 1) { finalLevelsArrLen = myNewNumLevels + 1; }
else { finalLevelsArrLen = myCurLevelsArr.length; }
//THE NEW LEVELS ARRAY
myNewLevelsArr = new int[finalLevelsArrLen];
for (int lvl = 0; lvl < myNewNumLevels + 1; lvl++) { // includes the "extra" index
myNewLevelsArr[lvl] = outlevels[lvl] + theShift;
}
//MEMORY SPACE MANAGEMENT
if (mySketch.getWritableMemory() != null) {
final WritableMemory wmem =
KllHelper.memorySpaceMgmt(mySketch, myNewLevelsArr.length, myNewDoubleItemsArr.length);
mySketch.setWritableMemory(wmem);
}
}
//Update Preamble:
mySketch.setN(finalN);
if (otherDblSk.isEstimationMode()) { //otherwise the merge brings over exact items.
mySketch.setMinK(min(myMinK, otherDblSk.getMinK()));
}
//Update numLevels, levelsArray, items
mySketch.setNumLevels(myNewNumLevels);
mySketch.setLevelsArray(myNewLevelsArr);
mySketch.setDoubleItemsArray(myNewDoubleItemsArr);
//Update min, max items
final double otherMin = otherDblSk.getMinItem();
final double otherMax = otherDblSk.getMaxItem();
if (myEmpty) {
mySketch.setMinItem(otherMin);
mySketch.setMaxItem(otherMax);
} else {
mySketch.setMinItem(min(myMin, otherMin));
mySketch.setMaxItem(max(myMax, otherMax));
}
assert KllHelper.sumTheSampleWeights(mySketch.getNumLevels(), mySketch.levelsArr) == mySketch.getN();
}
private static void mergeSortedDoubleArrays(
final double[] bufA, final int startA, final int lenA,
final double[] bufB, final int startB, final int lenB,
final double[] bufC, final int startC) {
final int lenC = lenA + lenB;
final int limA = startA + lenA;
final int limB = startB + lenB;
final int limC = startC + lenC;
int a = startA;
int b = startB;
for (int c = startC; c < limC; c++) {
if (a == limA) {
bufC[c] = bufB[b];
b++;
} else if (b == limB) {
bufC[c] = bufA[a];
a++;
} else if (bufA[a] < bufB[b]) {
bufC[c] = bufA[a];
a++;
} else {
bufC[c] = bufB[b];
b++;
}
}
assert a == limA;
assert b == limB;
}
/**
* Validation Method. This must be modified to use the validation test
* @param buf the items array
* @param start data start
* @param length items array length
* @param random instance of Random
*/
//NOTE For validation Method: Need to modify to run.
private static void randomlyHalveDownDoubles(final double[] buf, final int start, final int length,
final Random random) {
assert isEven(length);
final int half_length = length / 2;
final int offset = random.nextInt(2); // disable for validation
//final int offset = deterministicOffset(); // enable for validation
int j = start + offset;
for (int i = start; i < (start + half_length); i++) {
buf[i] = buf[j];
j += 2;
}
}
/**
* Validation Method. This must be modified to use the validation test
* @param buf the items array
* @param start data start
* @param length items array length
* @param random instance of Random
*/
//NOTE For validation Method: Need to modify to run.
private static void randomlyHalveUpDoubles(final double[] buf, final int start, final int length,
final Random random) {
assert isEven(length);
final int half_length = length / 2;
final int offset = random.nextInt(2); // disable for validation
//final int offset = deterministicOffset(); // enable for validation
int j = (start + length) - 1 - offset;
for (int i = (start + length) - 1; i >= (start + half_length); i--) {
buf[i] = buf[j];
j -= 2;
}
}
//Called from KllDoublesSketch::update and this
static void updateDouble(final KllDoublesSketch dblSk,
final double item) {
if (Double.isNaN(item)) { return; } //ignore
if (dblSk.isEmpty()) {
dblSk.setMinItem(item);
dblSk.setMaxItem(item);
} else {
dblSk.setMinItem(min(dblSk.getMinItem(), item));
dblSk.setMaxItem(max(dblSk.getMaxItem(), item));
}
if (dblSk.levelsArr[0] == 0) { compressWhileUpdatingSketch(dblSk); }
final int myLevelsArrAtZero = dblSk.levelsArr[0]; //LevelsArr could be expanded
dblSk.incN();
dblSk.setLevelZeroSorted(false);
final int nextPos = myLevelsArrAtZero - 1;
assert myLevelsArrAtZero >= 0;
dblSk.setLevelsArrayAt(0, nextPos);
dblSk.setDoubleItemsArrayAt(nextPos, item);
}
/**
* Compression algorithm used to merge higher levels.
* <p>Here is what we do for each level:</p>
* <ul><li>If it does not need to be compacted, then simply copy it over.</li>
* <li>Otherwise, it does need to be compacted, so...
* <ul><li>Copy zero or one guy over.</li>
* <li>If the level above is empty, halve up.</li>
* <li>Else the level above is nonempty, so halve down, then merge up.</li>
* </ul></li>
* <li>Adjust the boundaries of the level above.</li>
* </ul>
*
* <p>It can be proved that generalCompress returns a sketch that satisfies the space constraints
* no matter how much data is passed in.
* We are pretty sure that it works correctly when inBuf and outBuf are the same.
* All levels except for level zero must be sorted before calling this, and will still be
* sorted afterwards.
* Level zero is not required to be sorted before, and may not be sorted afterwards.</p>
*
* <p>This trashes inBuf and inLevels and modifies outBuf and outLevels.</p>
*
* @param k The sketch parameter k
* @param m The minimum level size
* @param numLevelsIn provisional number of number of levels = max(this.numLevels, other.numLevels)
* @param inBuf work buffer of size = this.getNumRetained() + other.getNumRetainedAboveLevelZero().
* This contains the double[] of the other sketch
* @param inLevels work levels array size = ubOnNumLevels(this.n + other.n) + 2
* @param outBuf the same array as inBuf
* @param outLevels the same size as inLevels
* @param isLevelZeroSorted true if this.level 0 is sorted
* @param random instance of java.util.Random
* @return int array of: {numLevels, targetItemCount, currentItemCount)
*/
//
private static int[] generalDoublesCompress(
final int k,
final int m,
final int numLevelsIn,
final double[] inBuf,
final int[] inLevels,
final double[] outBuf,
final int[] outLevels,
final boolean isLevelZeroSorted,
final Random random) {
assert numLevelsIn > 0; // things are too weird if zero levels are allowed
int numLevels = numLevelsIn;
int currentItemCount = inLevels[numLevels] - inLevels[0]; // decreases with each compaction
int targetItemCount = KllHelper.computeTotalItemCapacity(k, m, numLevels); // increases if we add levels
boolean doneYet = false;
outLevels[0] = 0;
int curLevel = -1;
while (!doneYet) {
curLevel++; // start out at level 0
// If we are at the current top level, add an empty level above it for convenience,
// but do not increment numLevels until later
if (curLevel == (numLevels - 1)) {
inLevels[curLevel + 2] = inLevels[curLevel + 1];
}
final int rawBeg = inLevels[curLevel];
final int rawLim = inLevels[curLevel + 1];
final int rawPop = rawLim - rawBeg;
if ((currentItemCount < targetItemCount) || (rawPop < KllHelper.levelCapacity(k, numLevels, curLevel, m))) {
// copy level over as is
// because inBuf and outBuf could be the same, make sure we are not moving data upwards!
assert (rawBeg >= outLevels[curLevel]);
System.arraycopy(inBuf, rawBeg, outBuf, outLevels[curLevel], rawPop);
outLevels[curLevel + 1] = outLevels[curLevel] + rawPop;
}
else {
// The sketch is too full AND this level is too full, so we compact it
// Note: this can add a level and thus change the sketch's capacity
final int popAbove = inLevels[curLevel + 2] - rawLim;
final boolean oddPop = isOdd(rawPop);
final int adjBeg = oddPop ? 1 + rawBeg : rawBeg;
final int adjPop = oddPop ? rawPop - 1 : rawPop;
final int halfAdjPop = adjPop / 2;
if (oddPop) { // copy one guy over
outBuf[outLevels[curLevel]] = inBuf[rawBeg];
outLevels[curLevel + 1] = outLevels[curLevel] + 1;
} else { // copy zero guys over
outLevels[curLevel + 1] = outLevels[curLevel];
}
// level zero might not be sorted, so we must sort it if we wish to compact it
if ((curLevel == 0) && !isLevelZeroSorted) {
Arrays.sort(inBuf, adjBeg, adjBeg + adjPop);
}
if (popAbove == 0) { // Level above is empty, so halve up
randomlyHalveUpDoubles(inBuf, adjBeg, adjPop, random);
} else { // Level above is nonempty, so halve down, then merge up
randomlyHalveDownDoubles(inBuf, adjBeg, adjPop, random);
mergeSortedDoubleArrays(inBuf, adjBeg, halfAdjPop, inBuf, rawLim, popAbove, inBuf, adjBeg + halfAdjPop);
}
// track the fact that we just eliminated some data
currentItemCount -= halfAdjPop;
// Adjust the boundaries of the level above
inLevels[curLevel + 1] = inLevels[curLevel + 1] - halfAdjPop;
// Increment numLevels if we just compacted the old top level
// This creates some more capacity (the size of the new bottom level)
if (curLevel == (numLevels - 1)) {
numLevels++;
targetItemCount += KllHelper.levelCapacity(k, numLevels, 0, m);
}
} // end of code for compacting a level
// determine whether we have processed all levels yet (including any new levels that we created)
if (curLevel == (numLevels - 1)) { doneYet = true; }
} // end of loop over levels
assert (outLevels[numLevels] - outLevels[0]) == currentItemCount;
return new int[] {numLevels, targetItemCount, currentItemCount};
}
private static void populateDoubleWorkArrays(
final double[] workbuf, final int[] worklevels, final int provisionalNumLevels,
final int myCurNumLevels, final int[] myCurLevelsArr, final double[] myCurDoubleItemsArr,
final int otherNumLevels, final int[] otherLevelsArr, final double[] otherDoubleItemsArr) {
worklevels[0] = 0;
// Note: the level zero data from "other" was already inserted into "self"
final int selfPopZero = KllHelper.currentLevelSizeItems(0, myCurNumLevels, myCurLevelsArr);
System.arraycopy(myCurDoubleItemsArr, myCurLevelsArr[0], workbuf, worklevels[0], selfPopZero);
worklevels[1] = worklevels[0] + selfPopZero;
for (int lvl = 1; lvl < provisionalNumLevels; lvl++) {
final int selfPop = KllHelper.currentLevelSizeItems(lvl, myCurNumLevels, myCurLevelsArr);
final int otherPop = KllHelper.currentLevelSizeItems(lvl, otherNumLevels, otherLevelsArr);
worklevels[lvl + 1] = worklevels[lvl] + selfPop + otherPop;
if (selfPop > 0 && otherPop == 0) {
System.arraycopy(myCurDoubleItemsArr, myCurLevelsArr[lvl], workbuf, worklevels[lvl], selfPop);
} else if (selfPop == 0 && otherPop > 0) {
System.arraycopy(otherDoubleItemsArr, otherLevelsArr[lvl], workbuf, worklevels[lvl], otherPop);
} else if (selfPop > 0 && otherPop > 0) {
mergeSortedDoubleArrays(
myCurDoubleItemsArr, myCurLevelsArr[lvl], selfPop,
otherDoubleItemsArr, otherLevelsArr[lvl], otherPop,
workbuf, worklevels[lvl]);
}
}
}
/*
* Validation Method.
* The following must be enabled for use with the KllDoublesValidationTest,
* which is only enabled for manual testing. In addition, two Validation Methods
* above need to be modified.
*/ //NOTE Validation Method: Need to uncomment to use
// static int nextOffset = 0;
//
// private static int deterministicOffset() {
// final int result = nextOffset;
// nextOffset = 1 - nextOffset;
// return result;
// }
}
| 2,635 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllMemoryValidate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static org.apache.datasketches.kll.KllPreambleUtil.DATA_START_ADR;
import static org.apache.datasketches.kll.KllPreambleUtil.DATA_START_ADR_SINGLE_ITEM;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryEmptyFlag;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryFamilyID;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryFlags;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryK;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryLevelZeroSortedFlag;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryM;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryMinK;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryN;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryNumLevels;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryPreInts;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemorySerVer;
import static org.apache.datasketches.kll.KllSketch.SketchType.DOUBLES_SKETCH;
import static org.apache.datasketches.kll.KllSketch.SketchType.FLOATS_SKETCH;
import static org.apache.datasketches.kll.KllSketch.SketchType.ITEMS_SKETCH;
import org.apache.datasketches.common.ArrayOfItemsSerDe;
import org.apache.datasketches.common.Family;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.kll.KllSketch.SketchStructure;
import org.apache.datasketches.kll.KllSketch.SketchType;
import org.apache.datasketches.memory.Memory;
/**
* This class performs all the error checking of an incoming Memory object and extracts the key fields in the process.
* This is used by all KLL sketches that read or import Memory objects.
* @author lrhodes
*
*/
final class KllMemoryValidate {
final Memory srcMem;
final ArrayOfItemsSerDe<?> serDe;
final SketchType sketchType;
final SketchStructure sketchStructure;
// first 8 bytes of preamble
final int preInts; //used by KllPreambleUtil
final int serVer; //used by KllPreambleUtil
final int familyID; //used by KllPreambleUtil
final int flags; //used by KllPreambleUtil
final int k; //used multiple places
final int m; //used multiple places
//byte 7 is unused
//Flag bits:
final boolean emptyFlag; //used multiple places
final boolean level0SortedFlag; //used multiple places
// depending on the layout, the next 8-16 bytes of the preamble, may be derived by assumption.
// For example, if the layout is compact & empty, n = 0, if compact and single, n = 1.
long n; //8 bytes (if present), used multiple places
int minK; //2 bytes (if present), used multiple places
int numLevels; //1 byte (if present), used by KllPreambleUtil
//skip unused byte
int[] levelsArr; //starts at byte 20, adjusted to include top index here, used multiple places
// derived.
int sketchBytes = 0; //used by KllPreambleUtil
private int typeBytes = 0; //always 0 for generic
KllMemoryValidate(final Memory srcMem, final SketchType sketchType) {
this(srcMem, sketchType, null);
}
KllMemoryValidate(final Memory srcMem, final SketchType sketchType, final ArrayOfItemsSerDe<?> serDe) {
final long memCapBytes = srcMem.getCapacity();
if (memCapBytes < 8) { throw new SketchesArgumentException(MEMORY_TOO_SMALL + memCapBytes); }
this.srcMem = srcMem;
this.sketchType = sketchType;
this.serDe = serDe;
preInts = getMemoryPreInts(srcMem);
serVer = getMemorySerVer(srcMem);
sketchStructure = SketchStructure.getSketchStructure(preInts, serVer);
familyID = getMemoryFamilyID(srcMem);
if (familyID != Family.KLL.getID()) { throw new SketchesArgumentException(SRC_NOT_KLL + familyID); }
flags = getMemoryFlags(srcMem);
k = getMemoryK(srcMem);
m = getMemoryM(srcMem);
KllHelper.checkM(m);
KllHelper.checkK(k, m);
//flags
emptyFlag = getMemoryEmptyFlag(srcMem);
level0SortedFlag = getMemoryLevelZeroSortedFlag(srcMem);
if (sketchType == DOUBLES_SKETCH) { typeBytes = Double.BYTES; }
else if (sketchType == FLOATS_SKETCH) { typeBytes = Float.BYTES; }
else { typeBytes = 0; }
validate();
}
private void validate() {
switch (sketchStructure) {
case COMPACT_FULL: {
if (emptyFlag) { throw new SketchesArgumentException(EMPTY_FLAG_AND_COMPACT_FULL); }
n = getMemoryN(srcMem);
//if (n <= 1) { memoryValidateThrow(N_AND_COMPACT_FULL); } //very old sketches prior to serVer=2 will violate.
minK = getMemoryMinK(srcMem);
numLevels = getMemoryNumLevels(srcMem);
// Get Levels Arr and add the last element
levelsArr = new int[numLevels + 1];
srcMem.getIntArray(DATA_START_ADR, levelsArr, 0, numLevels); //copies all except the last one
final int capacityItems = KllHelper.computeTotalItemCapacity(k, m, numLevels);
levelsArr[numLevels] = capacityItems; //load the last one
sketchBytes = computeSketchBytes(srcMem, sketchType, levelsArr, false, serDe);
break;
}
case COMPACT_EMPTY: {
if (!emptyFlag) { throw new SketchesArgumentException(EMPTY_FLAG_AND_COMPACT_EMPTY); }
n = 0; //assumed
minK = k; //assumed
numLevels = 1; //assumed
levelsArr = new int[] {k, k};
sketchBytes = DATA_START_ADR_SINGLE_ITEM;
break;
}
case COMPACT_SINGLE: {
if (emptyFlag) { throw new SketchesArgumentException(EMPTY_FLAG_AND_COMPACT_SINGLE); }
n = 1; //assumed
minK = k; //assumed
numLevels = 1; //assumed
levelsArr = new int[] {k - 1, k};
if (sketchType == ITEMS_SKETCH) {
sketchBytes = DATA_START_ADR_SINGLE_ITEM + serDe.sizeOf(srcMem, DATA_START_ADR_SINGLE_ITEM, 1);
} else {
sketchBytes = DATA_START_ADR_SINGLE_ITEM + typeBytes;
}
break;
}
case UPDATABLE: {
n = getMemoryN(srcMem);
minK = getMemoryMinK(srcMem);
numLevels = getMemoryNumLevels(srcMem);
levelsArr = new int[numLevels + 1];
srcMem.getIntArray(DATA_START_ADR, levelsArr, 0, numLevels + 1);
sketchBytes = computeSketchBytes(srcMem, sketchType, levelsArr, true, serDe);
break;
}
default: break; //can not happen
}
}
static int computeSketchBytes( //for COMPACT_FULL or UPDATABLE only
final Memory srcMem,
final SketchType sketchType,
final int[] levelsArr, //full levels array
final boolean updatable,
final ArrayOfItemsSerDe<?> serDe) { //serDe only valid for ITEMS_SKETCH
final int numLevels = levelsArr.length - 1;
final int capacityItems = levelsArr[numLevels];
final int retainedItems = (levelsArr[numLevels] - levelsArr[0]);
final int levelsLen = updatable ? levelsArr.length : levelsArr.length - 1;
final int numItems = updatable ? capacityItems : retainedItems;
int offsetBytes = DATA_START_ADR + levelsLen * Integer.BYTES;
if (sketchType == ITEMS_SKETCH) {
offsetBytes += serDe.sizeOf(srcMem, offsetBytes, numItems + 2); //2 for min & max
} else {
final int typeBytes = sketchType.getBytes();
offsetBytes += (numItems + 2) * typeBytes; //2 for min & max
}
return offsetBytes;
}
static final String EMPTY_FLAG_AND_COMPACT_EMPTY = "A compact empty sketch should have empty flag set. ";
static final String EMPTY_FLAG_AND_COMPACT_FULL = "A compact full sketch should not have empty flag set. ";
static final String EMPTY_FLAG_AND_COMPACT_SINGLE = "A single item sketch should not have empty flag set. ";
//static final String N_AND_COMPACT_FULL = "A compact full sketch should have n > 1. ";
static final String SRC_NOT_KLL = "FamilyID Field must be: " + Family.KLL.getID() + ", NOT: ";
static final String MEMORY_TOO_SMALL = "A sketch memory image must be at least 8 bytes. ";
}
| 2,636 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllDirectFloatsSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static org.apache.datasketches.common.ByteArrayUtil.copyBytes;
import static org.apache.datasketches.kll.KllPreambleUtil.DATA_START_ADR;
import static org.apache.datasketches.kll.KllPreambleUtil.DATA_START_ADR_SINGLE_ITEM;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryK;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryLevelZeroSortedFlag;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryM;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryMinK;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryN;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryNumLevels;
import static org.apache.datasketches.kll.KllPreambleUtil.setMemoryFamilyID;
import static org.apache.datasketches.kll.KllPreambleUtil.setMemoryK;
import static org.apache.datasketches.kll.KllPreambleUtil.setMemoryLevelZeroSortedFlag;
import static org.apache.datasketches.kll.KllPreambleUtil.setMemoryM;
import static org.apache.datasketches.kll.KllPreambleUtil.setMemoryMinK;
import static org.apache.datasketches.kll.KllPreambleUtil.setMemoryN;
import static org.apache.datasketches.kll.KllPreambleUtil.setMemoryNumLevels;
import static org.apache.datasketches.kll.KllPreambleUtil.setMemoryPreInts;
import static org.apache.datasketches.kll.KllPreambleUtil.setMemorySerVer;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_EMPTY;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_FULL;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_SINGLE;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.UPDATABLE;
import static org.apache.datasketches.kll.KllSketch.SketchType.FLOATS_SKETCH;
import org.apache.datasketches.common.ByteArrayUtil;
import org.apache.datasketches.common.Family;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableMemory;
/**
* This class implements an off-heap, updatable KllFloatsSketch using WritableMemory.
*
* <p>Please refer to the documentation in the package-info:<br>
* {@link org.apache.datasketches.kll}</p>
*
* @author Lee Rhodes, Kevin Lang
*/
class KllDirectFloatsSketch extends KllFloatsSketch {
private WritableMemory wmem;
private MemoryRequestServer memReqSvr;
/**
* Constructs from Memory or WritableMemory already initialized with a sketch image and validated.
* @param wmem the current WritableMemory
* @param memReqSvr the given MemoryRequestServer to request a larger WritableMemory
* @param memVal the MemoryValadate object
*/
KllDirectFloatsSketch(
final SketchStructure sketchStructure,
final WritableMemory wmem,
final MemoryRequestServer memReqSvr,
final KllMemoryValidate memVal) {
super(sketchStructure);
this.wmem = wmem;
this.memReqSvr = memReqSvr;
readOnly = (wmem != null && wmem.isReadOnly()) || sketchStructure != UPDATABLE;
levelsArr = memVal.levelsArr; //always converted to writable form.
}
/**
* Create a new updatable, direct instance of this sketch.
* @param k parameter that controls size of the sketch and accuracy of estimates
* @param m parameter that controls the minimum level width in items.
* @param dstMem the given destination WritableMemory object for use by the sketch
* @param memReqSvr the given MemoryRequestServer to request a larger WritableMemory
* @return a new instance of this sketch
*/
static KllDirectFloatsSketch newDirectUpdatableInstance(
final int k,
final int m,
final WritableMemory dstMem,
final MemoryRequestServer memReqSvr) {
setMemoryPreInts(dstMem, UPDATABLE.getPreInts());
setMemorySerVer(dstMem, UPDATABLE.getSerVer());
setMemoryFamilyID(dstMem, Family.KLL.getID());
setMemoryK(dstMem, k);
setMemoryM(dstMem, m);
setMemoryN(dstMem, 0);
setMemoryMinK(dstMem, k);
setMemoryNumLevels(dstMem, 1);
int offset = DATA_START_ADR;
//new Levels array
dstMem.putIntArray(offset, new int[] {k, k}, 0, 2);
offset += 2 * Integer.BYTES;
//new min/max array
dstMem.putFloatArray(offset, new float[] {Float.NaN, Float.NaN}, 0, 2);
offset += 2 * ITEM_BYTES;
//new empty items array
dstMem.putFloatArray(offset, new float[k], 0, k);
final KllMemoryValidate memVal = new KllMemoryValidate(dstMem, FLOATS_SKETCH, null);
final WritableMemory wMem = dstMem;
return new KllDirectFloatsSketch(UPDATABLE, wMem, memReqSvr, memVal);
}
//END of Constructors
@Override
public int getK() {
return getMemoryK(wmem);
}
@Override
public float getMaxItem() {
int levelsArrBytes = 0;
if (sketchStructure == COMPACT_EMPTY || isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
else if (sketchStructure == COMPACT_SINGLE) { return getFloatSingleItem(); }
else if (sketchStructure == COMPACT_FULL) {
levelsArrBytes = getLevelsArrSizeBytes(COMPACT_FULL);
} else { //UPDATABLE
levelsArrBytes = getLevelsArrSizeBytes(UPDATABLE);
}
final int offset = DATA_START_ADR + levelsArrBytes + ITEM_BYTES;
return wmem.getFloat(offset);
}
@Override
public float getMinItem() {
int levelsArrBytes = 0;
if (sketchStructure == COMPACT_EMPTY || isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
else if (sketchStructure == COMPACT_SINGLE) { return getFloatSingleItem(); }
else if (sketchStructure == COMPACT_FULL) {
levelsArrBytes = getLevelsArrSizeBytes(COMPACT_FULL);
} else { //UPDATABLE
levelsArrBytes = getLevelsArrSizeBytes(UPDATABLE);
}
final int offset = DATA_START_ADR + levelsArrBytes;
return wmem.getFloat(offset);
}
@Override
public long getN() {
if (sketchStructure == COMPACT_EMPTY) { return 0; }
else if (sketchStructure == COMPACT_SINGLE) { return 1; }
else { return getMemoryN(wmem); }
}
//restricted
@Override //returns updatable, expanded array including empty/garbage space at bottom
float[] getFloatItemsArray() {
final int k = getK();
if (sketchStructure == COMPACT_EMPTY) { return new float[k]; }
if (sketchStructure == COMPACT_SINGLE) {
final float[] itemsArr = new float[k];
itemsArr[k - 1] = getFloatSingleItem();
return itemsArr;
}
final int capacityItems = KllHelper.computeTotalItemCapacity(k, getM(), getNumLevels());
final float[] floatItemsArr = new float[capacityItems];
final int offset = DATA_START_ADR + getLevelsArrSizeBytes(sketchStructure) + 2 * ITEM_BYTES;
final int shift = (sketchStructure == COMPACT_FULL) ? levelsArr[0] : 0;
final int numItems = (sketchStructure == COMPACT_FULL) ? getNumRetained() : capacityItems;
wmem.getFloatArray(offset, floatItemsArr, shift, numItems);
return floatItemsArr;
}
@Override //returns compact items array of retained items, no empty/garbage.
float[] getFloatRetainedItemsArray() {
if (sketchStructure == COMPACT_EMPTY) { return new float[0]; }
if (sketchStructure == COMPACT_SINGLE) { return new float[] { getFloatSingleItem() }; }
final int numRetained = getNumRetained();
final float[] floatItemsArr = new float[numRetained];
final int offset = DATA_START_ADR + getLevelsArrSizeBytes(sketchStructure) + 2 * ITEM_BYTES
+ (sketchStructure == COMPACT_FULL ? 0 : levelsArr[0] * ITEM_BYTES);
wmem.getFloatArray(offset, floatItemsArr, 0, numRetained);
return floatItemsArr;
}
@Override
float getFloatSingleItem() {
if (!isSingleItem()) { throw new SketchesArgumentException(NOT_SINGLE_ITEM_MSG); }
if (sketchStructure == COMPACT_SINGLE) {
return wmem.getFloat(DATA_START_ADR_SINGLE_ITEM);
}
final int offset;
if (sketchStructure == COMPACT_FULL) {
offset = DATA_START_ADR + getLevelsArrSizeBytes(sketchStructure) + 2 * ITEM_BYTES;
} else { //sketchStructure == UPDATABLE
offset = DATA_START_ADR + getLevelsArrSizeBytes(sketchStructure) + (2 + getK() - 1) * ITEM_BYTES;
}
return wmem.getFloat(offset);
}
@Override
int getM() {
return getMemoryM(wmem);
}
@Override
MemoryRequestServer getMemoryRequestServer() { return memReqSvr; }
@Override
int getMinK() {
if (sketchStructure == COMPACT_FULL || sketchStructure == UPDATABLE) { return getMemoryMinK(wmem); }
return getK();
}
@Override
byte[] getMinMaxByteArr() {
final byte[] bytesOut = new byte[2 * ITEM_BYTES];
if (sketchStructure == COMPACT_EMPTY) {
ByteArrayUtil.putFloatLE(bytesOut, 0, Float.NaN);
ByteArrayUtil.putFloatLE(bytesOut, ITEM_BYTES, Float.NaN);
return bytesOut;
}
final int offset;
if (sketchStructure == COMPACT_SINGLE) {
offset = DATA_START_ADR_SINGLE_ITEM;
wmem.getByteArray(offset, bytesOut, 0, ITEM_BYTES);
copyBytes(bytesOut, 0, bytesOut, ITEM_BYTES, ITEM_BYTES);
return bytesOut;
}
//sketchStructure == UPDATABLE OR COMPACT_FULL
offset = DATA_START_ADR + getLevelsArrSizeBytes(sketchStructure);
wmem.getByteArray(offset, bytesOut, 0, ITEM_BYTES);
wmem.getByteArray(offset + ITEM_BYTES, bytesOut, ITEM_BYTES, ITEM_BYTES);
return bytesOut;
}
@Override
byte[] getRetainedItemsByteArr() {
if (sketchStructure == COMPACT_EMPTY) { return new byte[0]; }
final float[] fltArr = getFloatRetainedItemsArray();
final byte[] fltByteArr = new byte[fltArr.length * ITEM_BYTES];
final WritableMemory wmem2 = WritableMemory.writableWrap(fltByteArr);
wmem2.putFloatArray(0, fltArr, 0, fltArr.length);
return fltByteArr;
}
@Override
byte[] getTotalItemsByteArr() {
final float[] fltArr = getFloatItemsArray();
final byte[] fltByteArr = new byte[fltArr.length * ITEM_BYTES];
final WritableMemory wmem2 = WritableMemory.writableWrap(fltByteArr);
wmem2.putFloatArray(0, fltArr, 0, fltArr.length);
return fltByteArr;
}
@Override
WritableMemory getWritableMemory() {
return wmem;
}
@Override
void incN() {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
long n = getMemoryN(wmem);
setMemoryN(wmem, ++n);
}
@Override
void incNumLevels() {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
int numLevels = getMemoryNumLevels(wmem);
setMemoryNumLevels(wmem, ++numLevels);
}
@Override
boolean isLevelZeroSorted() {
return getMemoryLevelZeroSortedFlag(wmem);
}
@Override
void setFloatItemsArray(final float[] floatItems) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
final int offset = DATA_START_ADR + getLevelsArrSizeBytes(sketchStructure) + 2 * ITEM_BYTES;
wmem.putFloatArray(offset, floatItems, 0, floatItems.length);
}
@Override
void setFloatItemsArrayAt(final int index, final float item) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
final int offset =
DATA_START_ADR + getLevelsArrSizeBytes(sketchStructure) + (index + 2) * ITEM_BYTES;
wmem.putFloat(offset, item);
}
@Override
void setLevelZeroSorted(final boolean sorted) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
setMemoryLevelZeroSortedFlag(wmem, sorted);
}
@Override
void setMaxItem(final float item) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
final int offset = DATA_START_ADR + getLevelsArrSizeBytes(sketchStructure) + ITEM_BYTES;
wmem.putFloat(offset, item);
}
@Override
void setMinItem(final float item) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
final int offset = DATA_START_ADR + getLevelsArrSizeBytes(sketchStructure);
wmem.putFloat(offset, item);
}
@Override
void setMinK(final int minK) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
setMemoryMinK(wmem, minK);
}
@Override
void setN(final long n) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
setMemoryN(wmem, n);
}
@Override
void setNumLevels(final int numLevels) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
setMemoryNumLevels(wmem, numLevels);
}
@Override
void setWritableMemory(final WritableMemory wmem) {
this.wmem = wmem;
}
final static class KllDirectCompactFloatsSketch extends KllDirectFloatsSketch {
KllDirectCompactFloatsSketch(
final SketchStructure sketchStructure,
final Memory srcMem,
final KllMemoryValidate memVal) {
super(sketchStructure, (WritableMemory) srcMem, null, memVal);
}
}
}
| 2,637 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllItemsSketchIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import org.apache.datasketches.quantilescommon.QuantilesGenericSketchIterator;
/**
* Iterator over KllItemsSketch. The order is not defined.
*/
public final class KllItemsSketchIterator<T> implements QuantilesGenericSketchIterator<T> {
private final Object[] quantiles;
private final int[] levelsArr;
private final int numLevels;
private int level;
private int index;
private long weight;
private boolean isInitialized_;
KllItemsSketchIterator(final Object[] quantiles, final int[] levelsArr, final int numLevels) {
this.quantiles = quantiles;
this.levelsArr = levelsArr;
this.numLevels = numLevels;
this.isInitialized_ = false;
}
@SuppressWarnings("unchecked")
@Override
public T getQuantile() {
return (T)quantiles[index];
}
@Override
public long getWeight() {
return weight;
}
@Override
public boolean next() {
if (!isInitialized_) {
level = 0;
index = levelsArr[level];
weight = 1;
isInitialized_ = true;
} else {
index++;
}
if (index < levelsArr[level + 1]) {
return true;
}
// go to the next non-empty level
do {
level++;
if (level == numLevels) {
return false; // run out of levels
}
weight *= 2;
} while (levelsArr[level] == levelsArr[level + 1]);
index = levelsArr[level];
return true;
}
}
| 2,638 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllFloatsSketchIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import org.apache.datasketches.quantilescommon.QuantilesFloatsSketchIterator;
/**
* Iterator over KllFloatsSketch. The order is not defined.
*/
public final class KllFloatsSketchIterator implements QuantilesFloatsSketchIterator {
private final float[] quantiles;
private final int[] levelsArr;
private final int numLevels;
private int level;
private int index;
private long weight;
private boolean isInitialized;
KllFloatsSketchIterator(final float[] quantiles, final int[] levelsArr, final int numLevels) {
this.quantiles = quantiles;
this.levelsArr = levelsArr;
this.numLevels = numLevels;
this.isInitialized = false;
}
@Override
public float getQuantile() {
return quantiles[index];
}
@Override
public long getWeight() {
return weight;
}
@Override
public boolean next() {
if (!isInitialized) {
level = 0;
index = levelsArr[level];
weight = 1;
isInitialized = true;
} else {
index++;
}
if (index < levelsArr[level + 1]) {
return true;
}
// go to the next non-empty level
do {
level++;
if (level == numLevels) {
return false; // run out of levels
}
weight *= 2;
} while (levelsArr[level] == levelsArr[level + 1]);
index = levelsArr[level];
return true;
}
}
| 2,639 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllFloatsSketchSortedView.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static org.apache.datasketches.quantilescommon.QuantileSearchCriteria.INCLUSIVE;
import static org.apache.datasketches.quantilescommon.QuantilesAPI.EMPTY_MSG;
import java.util.Arrays;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.quantilescommon.FloatsSortedView;
import org.apache.datasketches.quantilescommon.InequalitySearch;
import org.apache.datasketches.quantilescommon.QuantileSearchCriteria;
import org.apache.datasketches.quantilescommon.QuantilesUtil;
/**
* The SortedView of the KllFloatsSketch.
* @author Alexander Saydakov
* @author Lee Rhodes
*/
public final class KllFloatsSketchSortedView implements FloatsSortedView {
private final float[] quantiles;
private final long[] cumWeights; //comes in as individual weights, converted to cumulative natural weights
private final long totalN;
/**
* Construct from elements for testing.
* @param quantiles sorted array of quantiles
* @param cumWeights sorted, monotonically increasing cumulative weights.
* @param totalN the total number of items presented to the sketch.
*/
KllFloatsSketchSortedView(final float[] quantiles, final long[] cumWeights, final long totalN) {
this.quantiles = quantiles;
this.cumWeights = cumWeights;
this.totalN = totalN;
}
/**
* Constructs this Sorted View given the sketch
* @param sk the given KllFloatsSketch.
*/
public KllFloatsSketchSortedView(final KllFloatsSketch sk) {
this.totalN = sk.getN();
final float[] srcQuantiles = sk.getFloatItemsArray();
final int[] srcLevels = sk.levelsArr;
final int srcNumLevels = sk.getNumLevels();
if (!sk.isLevelZeroSorted()) {
Arrays.sort(srcQuantiles, srcLevels[0], srcLevels[1]);
if (!sk.hasMemory()) { sk.setLevelZeroSorted(true); }
}
final int numQuantiles = srcLevels[srcNumLevels] - srcLevels[0]; //remove garbage
quantiles = new float[numQuantiles];
cumWeights = new long[numQuantiles];
populateFromSketch(srcQuantiles, srcLevels, srcNumLevels, numQuantiles);
}
@Override
public long[] getCumulativeWeights() {
return cumWeights.clone();
}
@Override
public float getQuantile(final double rank, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
QuantilesUtil.checkNormalizedRankBounds(rank);
final int len = cumWeights.length;
final long naturalRank = (searchCrit == INCLUSIVE)
? (long)Math.ceil(rank * totalN) : (long)Math.floor(rank * totalN);
final InequalitySearch crit = (searchCrit == INCLUSIVE) ? InequalitySearch.GE : InequalitySearch.GT;
final int index = InequalitySearch.find(cumWeights, 0, len - 1, naturalRank, crit);
if (index == -1) {
return quantiles[quantiles.length - 1]; //EXCLUSIVE (GT) case: normRank == 1.0;
}
return quantiles[index];
}
@Override
public float[] getQuantiles() {
return quantiles.clone();
}
@Override
public double getRank(final float quantile, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
final int len = quantiles.length;
final InequalitySearch crit = (searchCrit == INCLUSIVE) ? InequalitySearch.LE : InequalitySearch.LT;
final int index = InequalitySearch.find(quantiles, 0, len - 1, quantile, crit);
if (index == -1) {
return 0; //EXCLUSIVE (LT) case: quantile <= minQuantile; INCLUSIVE (LE) case: quantile < minQuantile
}
return (double)cumWeights[index] / totalN;
}
@Override
public boolean isEmpty() {
return totalN == 0;
}
@Override
public KllFloatsSketchSortedViewIterator iterator() {
return new KllFloatsSketchSortedViewIterator(quantiles, cumWeights);
}
//restricted methods
private void populateFromSketch(final float[] srcQuantiles, final int[] srcLevels,
final int srcNumLevels, final int numItems) {
final int[] myLevels = new int[srcNumLevels + 1];
final int offset = srcLevels[0];
System.arraycopy(srcQuantiles, offset, quantiles, 0, numItems);
int srcLevel = 0;
int dstLevel = 0;
long weight = 1;
while (srcLevel < srcNumLevels) {
final int fromIndex = srcLevels[srcLevel] - offset;
final int toIndex = srcLevels[srcLevel + 1] - offset; // exclusive
if (fromIndex < toIndex) { // if equal, skip empty level
Arrays.fill(cumWeights, fromIndex, toIndex, weight);
myLevels[dstLevel] = fromIndex;
myLevels[dstLevel + 1] = toIndex;
dstLevel++;
}
srcLevel++;
weight *= 2;
}
final int numLevels = dstLevel;
blockyTandemMergeSort(quantiles, cumWeights, myLevels, numLevels); //create unit weights
KllHelper.convertToCumulative(cumWeights);
}
private static void blockyTandemMergeSort(final float[] quantiles, final long[] weights,
final int[] levels, final int numLevels) {
if (numLevels == 1) { return; }
// duplicate the input in preparation for the "ping-pong" copy reduction strategy.
final float[] quantilesTmp = Arrays.copyOf(quantiles, quantiles.length);
final long[] weightsTmp = Arrays.copyOf(weights, quantiles.length); // don't need the extra one here
blockyTandemMergeSortRecursion(quantilesTmp, weightsTmp, quantiles, weights, levels, 0, numLevels);
}
private static void blockyTandemMergeSortRecursion(
final float[] quantilesSrc, final long[] weightsSrc,
final float[] quantilesDst, final long[] weightsDst,
final int[] levels, final int startingLevel, final int numLevels) {
if (numLevels == 1) { return; }
final int numLevels1 = numLevels / 2;
final int numLevels2 = numLevels - numLevels1;
assert numLevels1 >= 1;
assert numLevels2 >= numLevels1;
final int startingLevel1 = startingLevel;
final int startingLevel2 = startingLevel + numLevels1;
// swap roles of src and dst
blockyTandemMergeSortRecursion(
quantilesDst, weightsDst,
quantilesSrc, weightsSrc,
levels, startingLevel1, numLevels1);
blockyTandemMergeSortRecursion(
quantilesDst, weightsDst,
quantilesSrc, weightsSrc,
levels, startingLevel2, numLevels2);
tandemMerge(
quantilesSrc, weightsSrc,
quantilesDst, weightsDst,
levels,
startingLevel1, numLevels1,
startingLevel2, numLevels2);
}
private static void tandemMerge(
final float[] quantilesSrc, final long[] weightsSrc,
final float[] quantilesDst, final long[] weightsDst,
final int[] levelStarts,
final int startingLevel1, final int numLevels1,
final int startingLevel2, final int numLevels2) {
final int fromIndex1 = levelStarts[startingLevel1];
final int toIndex1 = levelStarts[startingLevel1 + numLevels1]; // exclusive
final int fromIndex2 = levelStarts[startingLevel2];
final int toIndex2 = levelStarts[startingLevel2 + numLevels2]; // exclusive
int iSrc1 = fromIndex1;
int iSrc2 = fromIndex2;
int iDst = fromIndex1;
while (iSrc1 < toIndex1 && iSrc2 < toIndex2) {
if (quantilesSrc[iSrc1] < quantilesSrc[iSrc2]) {
quantilesDst[iDst] = quantilesSrc[iSrc1];
weightsDst[iDst] = weightsSrc[iSrc1];
iSrc1++;
} else {
quantilesDst[iDst] = quantilesSrc[iSrc2];
weightsDst[iDst] = weightsSrc[iSrc2];
iSrc2++;
}
iDst++;
}
if (iSrc1 < toIndex1) {
System.arraycopy(quantilesSrc, iSrc1, quantilesDst, iDst, toIndex1 - iSrc1);
System.arraycopy(weightsSrc, iSrc1, weightsDst, iDst, toIndex1 - iSrc1);
} else if (iSrc2 < toIndex2) {
System.arraycopy(quantilesSrc, iSrc2, quantilesDst, iDst, toIndex2 - iSrc2);
System.arraycopy(weightsSrc, iSrc2, weightsDst, iDst, toIndex2 - iSrc2);
}
}
}
| 2,640 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllFloatsSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static java.lang.Math.max;
import static java.lang.Math.min;
import static org.apache.datasketches.common.ByteArrayUtil.putFloatLE;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.UPDATABLE;
import static org.apache.datasketches.kll.KllSketch.SketchType.FLOATS_SKETCH;
import static org.apache.datasketches.quantilescommon.QuantilesUtil.equallyWeightedRanks;
import java.util.Objects;
import org.apache.datasketches.common.ArrayOfItemsSerDe;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.common.SuppressFBWarnings;
import org.apache.datasketches.kll.KllDirectFloatsSketch.KllDirectCompactFloatsSketch;
import org.apache.datasketches.memory.DefaultMemoryRequestServer;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableMemory;
import org.apache.datasketches.quantilescommon.FloatsSortedView;
import org.apache.datasketches.quantilescommon.QuantileSearchCriteria;
import org.apache.datasketches.quantilescommon.QuantilesFloatsAPI;
import org.apache.datasketches.quantilescommon.QuantilesFloatsSketchIterator;
/**
* This variation of the KllSketch implements primitive floats.
*
* @see org.apache.datasketches.kll.KllSketch
*/
public abstract class KllFloatsSketch extends KllSketch implements QuantilesFloatsAPI {
private KllFloatsSketchSortedView kllFloatsSV = null;
final static int ITEM_BYTES = Float.BYTES;
KllFloatsSketch(
final SketchStructure sketchStructure) {
super(SketchType.FLOATS_SKETCH, sketchStructure);
}
//Factories for new heap instances.
/**
* Create a new heap instance of this sketch with the default <em>k = 200</em>.
* The default <em>k</em> = 200 results in a normalized rank error of about
* 1.65%. Larger K will have smaller error but the sketch will be larger (and slower).
* @return new KllFloatsSketch on the Java heap.
*/
public static KllFloatsSketch newHeapInstance() {
return newHeapInstance(DEFAULT_K);
}
/**
* Create a new heap instance of this sketch with a given parameter <em>k</em>.
* <em>k</em> can be between 8, inclusive, and 65535, inclusive.
* The default <em>k</em> = 200 results in a normalized rank error of about
* 1.65%. Larger K will have smaller error but the sketch will be larger (and slower).
* @param k parameter that controls size of the sketch and accuracy of estimates.
* @return new KllFloatsSketch on the Java heap.
*/
public static KllFloatsSketch newHeapInstance(final int k) {
return new KllHeapFloatsSketch(k, DEFAULT_M);
}
//Factories for new direct instances.
/**
* Create a new direct updatable instance of this sketch with the default <em>k</em>.
* The default <em>k</em> = 200 results in a normalized rank error of about
* 1.65%. Larger <em>k</em> will have smaller error but the sketch will be larger (and slower).
* @param dstMem the given destination WritableMemory object for use by the sketch
* @param memReqSvr the given MemoryRequestServer to request a larger WritableMemory
* @return a new direct instance of this sketch
*/
public static KllFloatsSketch newDirectInstance(
final WritableMemory dstMem,
final MemoryRequestServer memReqSvr) {
return newDirectInstance(DEFAULT_K, dstMem, memReqSvr);
}
/**
* Create a new direct updatable instance of this sketch with a given <em>k</em>.
* @param k parameter that controls size of the sketch and accuracy of estimates.
* @param dstMem the given destination WritableMemory object for use by the sketch
* @param memReqSvr the given MemoryRequestServer to request a larger WritableMemory
* @return a new direct instance of this sketch
*/
public static KllFloatsSketch newDirectInstance(
final int k,
final WritableMemory dstMem,
final MemoryRequestServer memReqSvr) {
Objects.requireNonNull(dstMem, "Parameter 'dstMem' must not be null");
Objects.requireNonNull(memReqSvr, "Parameter 'memReqSvr' must not be null");
return KllDirectFloatsSketch.newDirectUpdatableInstance(k, DEFAULT_M, dstMem, memReqSvr);
}
//Factory to create an heap instance from a Memory image
/**
* Factory heapify takes a compact sketch image in Memory and instantiates an on-heap sketch.
* The resulting sketch will not retain any link to the source Memory.
* @param srcMem a compact Memory image of a sketch serialized by this sketch.
* <a href="{@docRoot}/resources/dictionary.html#mem">See Memory</a>
* @return a heap-based sketch based on the given Memory.
*/
public static KllFloatsSketch heapify(final Memory srcMem) {
Objects.requireNonNull(srcMem, "Parameter 'srcMem' must not be null");
return KllHeapFloatsSketch.heapifyImpl(srcMem);
}
//Factory to wrap a Read-Only Memory
/**
* Wrap a sketch around the given read only compact source Memory containing sketch data
* that originated from this sketch.
* @param srcMem the read only source Memory
* @return instance of this sketch
*/
public static KllFloatsSketch wrap(final Memory srcMem) {
Objects.requireNonNull(srcMem, "Parameter 'srcMem' must not be null");
final KllMemoryValidate memVal = new KllMemoryValidate(srcMem, FLOATS_SKETCH, null);
if (memVal.sketchStructure == UPDATABLE) {
final MemoryRequestServer memReqSvr = new DefaultMemoryRequestServer(); //dummy
return new KllDirectFloatsSketch(memVal.sketchStructure, (WritableMemory)srcMem, memReqSvr, memVal);
} else {
return new KllDirectCompactFloatsSketch(memVal.sketchStructure, srcMem, memVal);
}
}
//Factory to wrap a WritableMemory image
/**
* Wrap a sketch around the given source Writable Memory containing sketch data
* that originated from this sketch.
* @param srcMem a WritableMemory that contains data.
* @param memReqSvr the given MemoryRequestServer to request a larger WritableMemory
* @return instance of this sketch
*/
public static KllFloatsSketch writableWrap(
final WritableMemory srcMem,
final MemoryRequestServer memReqSvr) {
Objects.requireNonNull(srcMem, "Parameter 'srcMem' must not be null");
Objects.requireNonNull(memReqSvr, "Parameter 'memReqSvr' must not be null");
final KllMemoryValidate memVal = new KllMemoryValidate(srcMem, FLOATS_SKETCH, null);
if (memVal.sketchStructure == UPDATABLE) {
return new KllDirectFloatsSketch(UPDATABLE, srcMem, memReqSvr, memVal);
} else {
return new KllDirectCompactFloatsSketch(memVal.sketchStructure, srcMem, memVal);
}
}
//END of Constructors
@Override
public double[] getCDF(final float[] splitPoints, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
refreshSortedView();
return kllFloatsSV.getCDF(splitPoints, searchCrit);
}
@Override
public FloatsPartitionBoundaries getPartitionBoundaries(final int numEquallyWeighted,
final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
final double[] ranks = equallyWeightedRanks(numEquallyWeighted);
final float[] boundaries = getQuantiles(ranks, searchCrit);
boundaries[0] = getMinItem();
boundaries[boundaries.length - 1] = getMaxItem();
final FloatsPartitionBoundaries fpb = new FloatsPartitionBoundaries();
fpb.N = this.getN();
fpb.ranks = ranks;
fpb.boundaries = boundaries;
return fpb;
}
@Override
public double[] getPMF(final float[] splitPoints, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
refreshSortedView();
return kllFloatsSV.getPMF(splitPoints, searchCrit);
}
@Override
public float getQuantile(final double rank, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
refreshSortedView();
return kllFloatsSV.getQuantile(rank, searchCrit);
}
@Override
public float[] getQuantiles(final double[] ranks, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
refreshSortedView();
final int len = ranks.length;
final float[] quantiles = new float[len];
for (int i = 0; i < len; i++) {
quantiles[i] = kllFloatsSV.getQuantile(ranks[i], searchCrit);
}
return quantiles;
}
/**
* {@inheritDoc}
* The approximate probability that the true quantile is within the confidence interval
* specified by the upper and lower quantile bounds for this sketch is 0.99.
*/
@Override
public float getQuantileLowerBound(final double rank) {
return getQuantile(max(0, rank - KllHelper.getNormalizedRankError(getMinK(), false)));
}
/**
* {@inheritDoc}
* The approximate probability that the true quantile is within the confidence interval
* specified by the upper and lower quantile bounds for this sketch is 0.99.
*/
@Override
public float getQuantileUpperBound(final double rank) {
return getQuantile(min(1.0, rank + KllHelper.getNormalizedRankError(getMinK(), false)));
}
@Override
public double getRank(final float quantile, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
refreshSortedView();
return kllFloatsSV.getRank(quantile, searchCrit);
}
/**
* {@inheritDoc}
* The approximate probability that the true rank is within the confidence interval
* specified by the upper and lower rank bounds for this sketch is 0.99.
*/
@Override
public double getRankLowerBound(final double rank) {
return max(0.0, rank - KllHelper.getNormalizedRankError(getMinK(), false));
}
/**
* {@inheritDoc}
* The approximate probability that the true rank is within the confidence interval
* specified by the upper and lower rank bounds for this sketch is 0.99.
*/
@Override
public double getRankUpperBound(final double rank) {
return min(1.0, rank + KllHelper.getNormalizedRankError(getMinK(), false));
}
@Override
public double[] getRanks(final float[] quantiles, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
refreshSortedView();
final int len = quantiles.length;
final double[] ranks = new double[len];
for (int i = 0; i < len; i++) {
ranks[i] = kllFloatsSV.getRank(quantiles[i], searchCrit);
}
return ranks;
}
@Override
@SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = "OK in this case.")
public FloatsSortedView getSortedView() {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
refreshSortedView();
return kllFloatsSV;
}
@Override
public QuantilesFloatsSketchIterator iterator() {
return new KllFloatsSketchIterator(
getFloatItemsArray(), getLevelsArray(SketchStructure.UPDATABLE), getNumLevels());
}
@Override
public final void merge(final KllSketch other) {
if (readOnly || sketchStructure != UPDATABLE) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
final KllFloatsSketch othFltSk = (KllFloatsSketch)other;
if (othFltSk.isEmpty()) { return; }
KllFloatsHelper.mergeFloatImpl(this, othFltSk);
kllFloatsSV = null;
}
/**
* {@inheritDoc}
* <p>The parameter <i>k</i> will not change.</p>
*/
@Override
public final void reset() {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
final int k = getK();
setN(0);
setMinK(k);
setNumLevels(1);
setLevelZeroSorted(false);
setLevelsArray(new int[] {k, k});
setMinItem(Float.NaN);
setMaxItem(Float.NaN);
setFloatItemsArray(new float[k]);
kllFloatsSV = null;
}
@Override
public byte[] toByteArray() {
return KllHelper.toByteArray(this, false);
}
@Override
public void update(final float item) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
KllFloatsHelper.updateFloat(this, item);
kllFloatsSV = null;
}
//restricted
/**
* @return full size of internal items array including empty space at bottom.
*/
abstract float[] getFloatItemsArray();
/**
* @return items array of retained items.
*/
abstract float[] getFloatRetainedItemsArray();
abstract float getFloatSingleItem();
@Override
abstract byte[] getMinMaxByteArr();
@Override
int getMinMaxSizeBytes() {
return Float.BYTES * 2;
}
@Override
abstract byte[] getRetainedItemsByteArr();
@Override
int getRetainedItemsSizeBytes() {
return getNumRetained() * Float.BYTES;
}
@Override
ArrayOfItemsSerDe<?> getSerDe() { return null; }
@Override
final byte[] getSingleItemByteArr() {
final byte[] bytes = new byte[ITEM_BYTES];
putFloatLE(bytes, 0, getFloatSingleItem());
return bytes;
}
@Override
int getSingleItemSizeBytes() {
return Float.BYTES;
}
@Override
abstract byte[] getTotalItemsByteArr();
@Override
int getTotalItemsNumBytes() {
return levelsArr[getNumLevels()] * Float.BYTES;
}
private final void refreshSortedView() {
kllFloatsSV = (kllFloatsSV == null)
? new KllFloatsSketchSortedView(this) : kllFloatsSV;
}
abstract void setFloatItemsArray(float[] floatItems);
abstract void setFloatItemsArrayAt(int index, float item);
abstract void setMaxItem(float item);
abstract void setMinItem(float item);
}
| 2,641 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllHeapDoublesSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static org.apache.datasketches.common.ByteArrayUtil.putDoubleLE;
import static org.apache.datasketches.kll.KllPreambleUtil.DATA_START_ADR;
import static org.apache.datasketches.kll.KllPreambleUtil.DATA_START_ADR_SINGLE_ITEM;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_EMPTY;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_FULL;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_SINGLE;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.UPDATABLE;
import static org.apache.datasketches.kll.KllSketch.SketchType.DOUBLES_SKETCH;
import java.util.Arrays;
import java.util.Objects;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableMemory;
/**
* This class implements an on-heap doubles KllSketch.
*
* <p>Please refer to the documentation in the package-info:<br>
* {@link org.apache.datasketches.kll}</p>
*
* @author Lee Rhodes, Kevin Lang
*/
final class KllHeapDoublesSketch extends KllDoublesSketch {
private final int k; // configured size of K.
private final int m; // configured size of M.
private long n; // number of items input into this sketch.
private int minK; // dynamic minK for error estimation after merging with different k.
private boolean isLevelZeroSorted;
private double minDoubleItem;
private double maxDoubleItem;
private double[] doubleItems;
/**
* New instance heap constructor with a given parameters <em>k</em> and <em>m</em>.
*
* @param k parameter that controls size of the sketch and accuracy of estimates.
* <em>k</em> can be between <em>m</em> and 65535, inclusive.
* The default <em>k</em> = 200 results in a normalized rank error of about 1.65%.
* Larger <em>k</em> will have smaller error but the sketch will be larger (and slower).
* @param m parameter controls the minimum level width in items. It can be 2, 4, 6 or 8.
* The DEFAULT_M, which is 8 is recommended. Other sizes of <em>m</em> should be considered
* experimental as they have not been as well characterized.
*/
KllHeapDoublesSketch(final int k, final int m) {
super(UPDATABLE);
KllHelper.checkM(m);
KllHelper.checkK(k, m);
this.levelsArr = new int[] {k, k};
this.readOnly = false;
this.k = k;
this.m = m;
this.n = 0;
this.minK = k;
this.isLevelZeroSorted = false;
this.minDoubleItem = Double.NaN;
this.maxDoubleItem = Double.NaN;
this.doubleItems = new double[k];
}
/**
* Heapify constructor.
* @param srcMem Memory object that contains data serialized by this sketch.
* @param memValidate the MemoryValidate object
*/
private KllHeapDoublesSketch(
final Memory srcMem,
final KllMemoryValidate memValidate) {
super(UPDATABLE);
final SketchStructure memStructure = memValidate.sketchStructure;
this.k = memValidate.k;
this.m = memValidate.m;
this.n = memValidate.n;
this.minK = memValidate.minK;
this.levelsArr = memValidate.levelsArr; //normalized to full
this.isLevelZeroSorted = memValidate.level0SortedFlag;
if (memStructure == COMPACT_EMPTY) {
minDoubleItem = Double.NaN;
maxDoubleItem = Double.NaN;
doubleItems = new double[k];
}
else if (memStructure == COMPACT_SINGLE) {
final double item = srcMem.getDouble(DATA_START_ADR_SINGLE_ITEM);
minDoubleItem = maxDoubleItem = item;
doubleItems = new double[k];
doubleItems[k - 1] = item;
}
else if (memStructure == COMPACT_FULL) {
int offsetBytes = DATA_START_ADR;
offsetBytes += (levelsArr.length - 1) * Integer.BYTES; //shortened levelsArr
minDoubleItem = srcMem.getDouble(offsetBytes);
offsetBytes += Double.BYTES;
maxDoubleItem = srcMem.getDouble(offsetBytes);
offsetBytes += Double.BYTES;
final int capacityItems = levelsArr[getNumLevels()];
final int garbageItems = levelsArr[0];
final int retainedItems = capacityItems - garbageItems;
doubleItems = new double[capacityItems];
srcMem.getDoubleArray(offsetBytes, doubleItems, garbageItems, retainedItems);
}
else { //(memStructure == UPDATABLE)
int offsetBytes = DATA_START_ADR;
offsetBytes += levelsArr.length * Integer.BYTES; //full levelsArr
minDoubleItem = srcMem.getDouble(offsetBytes);
offsetBytes += Double.BYTES;
maxDoubleItem = srcMem.getDouble(offsetBytes);
offsetBytes += Double.BYTES;
final int capacityItems = levelsArr[getNumLevels()];
doubleItems = new double[capacityItems];
srcMem.getDoubleArray(offsetBytes, doubleItems, 0, capacityItems);
}
}
static KllHeapDoublesSketch heapifyImpl(final Memory srcMem) {
Objects.requireNonNull(srcMem, "Parameter 'srcMem' must not be null");
final KllMemoryValidate memVal = new KllMemoryValidate(srcMem, DOUBLES_SKETCH);
return new KllHeapDoublesSketch(srcMem, memVal);
}
@Override
public int getK() { return k; }
@Override
public double getMaxItem() {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
return maxDoubleItem;
}
@Override
public double getMinItem() {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
return minDoubleItem;
}
@Override
public long getN() { return n; }
//restricted
@Override
double[] getDoubleItemsArray() { return doubleItems; }
@Override
double getDoubleSingleItem() {
if (n != 1L) { throw new SketchesArgumentException(NOT_SINGLE_ITEM_MSG); }
return doubleItems[k - 1];
}
@Override
int getM() { return m; }
@Override
MemoryRequestServer getMemoryRequestServer() { return null; }
@Override
int getMinK() { return minK; }
@Override
byte[] getMinMaxByteArr() {
final byte[] bytesOut = new byte[2 * Double.BYTES];
putDoubleLE(bytesOut, 0, minDoubleItem);
putDoubleLE(bytesOut, Double.BYTES, maxDoubleItem);
return bytesOut;
}
@Override
byte[] getRetainedItemsByteArr() {
if (isEmpty()) { return new byte[0]; }
final byte[] bytesOut;
if (isSingleItem()) {
bytesOut = new byte[Double.BYTES];
putDoubleLE(bytesOut, 0, getDoubleSingleItem());
return bytesOut;
}
final int retained = getNumRetained();
final int bytes = retained * Double.BYTES;
bytesOut = new byte[bytes];
final WritableMemory wmem = WritableMemory.writableWrap(bytesOut);
wmem.putDoubleArray(0, doubleItems, levelsArr[0], retained);
return bytesOut;
}
@Override
byte[] getTotalItemsByteArr() {
final byte[] byteArr = new byte[doubleItems.length * Double.BYTES];
final WritableMemory wmem = WritableMemory.writableWrap(byteArr);
wmem.putDoubleArray(0, doubleItems, 0, doubleItems.length);
return byteArr;
}
@Override
WritableMemory getWritableMemory() {
return null;
}
@Override
void incN() { n++; }
@Override
void incNumLevels() {
//the heap sketch computes num levels from the array itself, so this is not used on-heap
}
@Override
boolean isLevelZeroSorted() { return this.isLevelZeroSorted; }
@Override
void setDoubleItemsArray(final double[] doubleItems) { this.doubleItems = doubleItems; }
@Override
void setDoubleItemsArrayAt(final int index, final double item) { this.doubleItems[index] = item; }
@Override
void setLevelZeroSorted(final boolean sorted) { this.isLevelZeroSorted = sorted; }
@Override
void setMaxItem(final double item) { this.maxDoubleItem = item; }
@Override
void setMinItem(final double item) { this.minDoubleItem = item; }
@Override
void setMinK(final int minK) { this.minK = minK; }
@Override
void setN(final long n) { this.n = n; }
@Override
void setNumLevels(final int numLevels) {
//the heap sketch computes num levels from the array itself, so this is not used on-heap
}
@Override
double[] getDoubleRetainedItemsArray() {
return Arrays.copyOfRange(doubleItems, levelsArr[0], levelsArr[getNumLevels()]);
}
@Override
void setWritableMemory(final WritableMemory wmem) { }
}
| 2,642 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllFloatsSketchSortedViewIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static org.apache.datasketches.quantilescommon.QuantileSearchCriteria.INCLUSIVE;
import org.apache.datasketches.quantilescommon.FloatsSortedViewIterator;
import org.apache.datasketches.quantilescommon.QuantileSearchCriteria;
/**
* Iterator over KllFloatsSketchSortedView
* @author Alexander Saydakov
* @author Lee Rhodes
*/
public final class KllFloatsSketchSortedViewIterator implements FloatsSortedViewIterator {
private final float[] quantiles;
private final long[] cumWeights;
private final long totalN;
private int index;
KllFloatsSketchSortedViewIterator(final float[] quantiles, final long[] cumWeights) {
this.quantiles = quantiles;
this.cumWeights = cumWeights;
this.totalN = (cumWeights.length > 0) ? cumWeights[cumWeights.length - 1] : 0;
index = -1;
}
@Override
public long getCumulativeWeight(final QuantileSearchCriteria searchCrit) {
if (searchCrit == INCLUSIVE) { return cumWeights[index]; }
return (index == 0) ? 0 : cumWeights[index - 1];
}
@Override
public long getN() {
return totalN;
}
@Override
public double getNormalizedRank(final QuantileSearchCriteria searchCrit) {
return (double) getCumulativeWeight(searchCrit) / totalN;
}
@Override
public float getQuantile() {
return quantiles[index];
}
@Override
public long getWeight() {
if (index == 0) { return cumWeights[0]; }
return cumWeights[index] - cumWeights[index - 1];
}
@Override
public boolean next() {
index++;
return index < quantiles.length;
}
}
| 2,643 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllDoublesSketchSortedView.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static org.apache.datasketches.quantilescommon.QuantileSearchCriteria.INCLUSIVE;
import static org.apache.datasketches.quantilescommon.QuantilesAPI.EMPTY_MSG;
import java.util.Arrays;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.quantilescommon.DoublesSortedView;
import org.apache.datasketches.quantilescommon.InequalitySearch;
import org.apache.datasketches.quantilescommon.QuantileSearchCriteria;
import org.apache.datasketches.quantilescommon.QuantilesUtil;
/**
* The SortedView of the KllDoublesSketch.
* @author Alexander Saydakov
* @author Lee Rhodes
*/
public final class KllDoublesSketchSortedView implements DoublesSortedView {
private final double[] quantiles;
private final long[] cumWeights; //comes in as individual weights, converted to cumulative natural weights
private final long totalN;
/**
* Construct from elements for testing.
* @param quantiles sorted array of quantiles
* @param cumWeights sorted, monotonically increasing cumulative weights.
* @param totalN the total number of items presented to the sketch.
*/
KllDoublesSketchSortedView(final double[] quantiles, final long[] cumWeights, final long totalN) {
this.quantiles = quantiles;
this.cumWeights = cumWeights;
this.totalN = totalN;
}
/**
* Constructs this Sorted View given the sketch
* @param sk the given KllDoublesSketch.
*/
public KllDoublesSketchSortedView(final KllDoublesSketch sk) {
this.totalN = sk.getN();
final double[] srcQuantiles = sk.getDoubleItemsArray();
final int[] srcLevels = sk.levelsArr;
final int srcNumLevels = sk.getNumLevels();
if (!sk.isLevelZeroSorted()) {
Arrays.sort(srcQuantiles, srcLevels[0], srcLevels[1]);
if (!sk.hasMemory()) { sk.setLevelZeroSorted(true); }
}
final int numQuantiles = srcLevels[srcNumLevels] - srcLevels[0]; //remove garbage
quantiles = new double[numQuantiles];
cumWeights = new long[numQuantiles];
populateFromSketch(srcQuantiles, srcLevels, srcNumLevels, numQuantiles);
}
@Override
public long[] getCumulativeWeights() {
return cumWeights.clone();
}
@Override
public double getQuantile(final double rank, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
QuantilesUtil.checkNormalizedRankBounds(rank);
final int len = cumWeights.length;
final long naturalRank = (searchCrit == INCLUSIVE)
? (long)Math.ceil(rank * totalN) : (long)Math.floor(rank * totalN);
final InequalitySearch crit = (searchCrit == INCLUSIVE) ? InequalitySearch.GE : InequalitySearch.GT;
final int index = InequalitySearch.find(cumWeights, 0, len - 1, naturalRank, crit);
if (index == -1) {
return quantiles[quantiles.length - 1]; //EXCLUSIVE (GT) case: normRank == 1.0;
}
return quantiles[index];
}
@Override
public double[] getQuantiles() {
return quantiles.clone();
}
@Override
public double getRank(final double quantile, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
final int len = quantiles.length;
final InequalitySearch crit = (searchCrit == INCLUSIVE) ? InequalitySearch.LE : InequalitySearch.LT;
final int index = InequalitySearch.find(quantiles, 0, len - 1, quantile, crit);
if (index == -1) {
return 0; //EXCLUSIVE (LT) case: quantile <= minQuantile; INCLUSIVE (LE) case: quantile < minQuantile
}
return (double)cumWeights[index] / totalN;
}
@Override
public boolean isEmpty() {
return totalN == 0;
}
@Override
public KllDoublesSketchSortedViewIterator iterator() {
return new KllDoublesSketchSortedViewIterator(quantiles, cumWeights);
}
//restricted methods
private void populateFromSketch(final double[] srcQuantiles, final int[] srcLevels,
final int srcNumLevels, final int numItems) {
final int[] myLevels = new int[srcNumLevels + 1];
final int offset = srcLevels[0];
System.arraycopy(srcQuantiles, offset, quantiles, 0, numItems);
int srcLevel = 0;
int dstLevel = 0;
long weight = 1;
while (srcLevel < srcNumLevels) {
final int fromIndex = srcLevels[srcLevel] - offset;
final int toIndex = srcLevels[srcLevel + 1] - offset; // exclusive
if (fromIndex < toIndex) { // if equal, skip empty level
Arrays.fill(cumWeights, fromIndex, toIndex, weight);
myLevels[dstLevel] = fromIndex;
myLevels[dstLevel + 1] = toIndex;
dstLevel++;
}
srcLevel++;
weight *= 2;
}
final int numLevels = dstLevel;
blockyTandemMergeSort(quantiles, cumWeights, myLevels, numLevels); //create unit weights
KllHelper.convertToCumulative(cumWeights);
}
private static void blockyTandemMergeSort(final double[] quantiles, final long[] weights,
final int[] levels, final int numLevels) {
if (numLevels == 1) { return; }
// duplicate the input in preparation for the "ping-pong" copy reduction strategy.
final double[] quantilesTmp = Arrays.copyOf(quantiles, quantiles.length);
final long[] weightsTmp = Arrays.copyOf(weights, quantiles.length); // don't need the extra one
blockyTandemMergeSortRecursion(quantilesTmp, weightsTmp, quantiles, weights, levels, 0, numLevels);
}
private static void blockyTandemMergeSortRecursion(
final double[] quantilesSrc, final long[] weightsSrc,
final double[] quantilesDst, final long[] weightsDst,
final int[] levels, final int startingLevel, final int numLevels) {
if (numLevels == 1) { return; }
final int numLevels1 = numLevels / 2;
final int numLevels2 = numLevels - numLevels1;
assert numLevels1 >= 1;
assert numLevels2 >= numLevels1;
final int startingLevel1 = startingLevel;
final int startingLevel2 = startingLevel + numLevels1;
// swap roles of src and dst
blockyTandemMergeSortRecursion(
quantilesDst, weightsDst,
quantilesSrc, weightsSrc,
levels, startingLevel1, numLevels1);
blockyTandemMergeSortRecursion(
quantilesDst, weightsDst,
quantilesSrc, weightsSrc,
levels, startingLevel2, numLevels2);
tandemMerge(
quantilesSrc, weightsSrc,
quantilesDst, weightsDst,
levels,
startingLevel1, numLevels1,
startingLevel2, numLevels2);
}
private static void tandemMerge(
final double[] quantilesSrc, final long[] weightsSrc,
final double[] quantilesDst, final long[] weightsDst,
final int[] levelStarts,
final int startingLevel1, final int numLevels1,
final int startingLevel2, final int numLevels2) {
final int fromIndex1 = levelStarts[startingLevel1];
final int toIndex1 = levelStarts[startingLevel1 + numLevels1]; // exclusive
final int fromIndex2 = levelStarts[startingLevel2];
final int toIndex2 = levelStarts[startingLevel2 + numLevels2]; // exclusive
int iSrc1 = fromIndex1;
int iSrc2 = fromIndex2;
int iDst = fromIndex1;
while (iSrc1 < toIndex1 && iSrc2 < toIndex2) {
if (quantilesSrc[iSrc1] < quantilesSrc[iSrc2]) {
quantilesDst[iDst] = quantilesSrc[iSrc1];
weightsDst[iDst] = weightsSrc[iSrc1];
iSrc1++;
} else {
quantilesDst[iDst] = quantilesSrc[iSrc2];
weightsDst[iDst] = weightsSrc[iSrc2];
iSrc2++;
}
iDst++;
}
if (iSrc1 < toIndex1) {
System.arraycopy(quantilesSrc, iSrc1, quantilesDst, iDst, toIndex1 - iSrc1);
System.arraycopy(weightsSrc, iSrc1, weightsDst, iDst, toIndex1 - iSrc1);
} else if (iSrc2 < toIndex2) {
System.arraycopy(quantilesSrc, iSrc2, quantilesDst, iDst, toIndex2 - iSrc2);
System.arraycopy(weightsSrc, iSrc2, weightsDst, iDst, toIndex2 - iSrc2);
}
}
}
| 2,644 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllHeapFloatsSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static org.apache.datasketches.common.ByteArrayUtil.putFloatLE;
import static org.apache.datasketches.kll.KllPreambleUtil.DATA_START_ADR;
import static org.apache.datasketches.kll.KllPreambleUtil.DATA_START_ADR_SINGLE_ITEM;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_EMPTY;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_FULL;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_SINGLE;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.UPDATABLE;
import static org.apache.datasketches.kll.KllSketch.SketchType.FLOATS_SKETCH;
import java.util.Arrays;
import java.util.Objects;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableMemory;
/**
* This class implements an on-heap floats KllSketch.
*
* <p>Please refer to the documentation in the package-info:<br>
* {@link org.apache.datasketches.kll}</p>
*
* @author Lee Rhodes, Kevin Lang
*/
final class KllHeapFloatsSketch extends KllFloatsSketch {
private final int k; // configured size of K.
private final int m; // configured size of M.
private long n; // number of items input into this sketch.
private int minK; // dynamic minK for error estimation after merging with different k.
private boolean isLevelZeroSorted;
private float minFloatItem;
private float maxFloatItem;
private float[] floatItems;
/**
* New instance heap constructor with a given parameters <em>k</em> and <em>m</em>.
*
* @param k parameter that controls size of the sketch and accuracy of estimates.
* <em>k</em> can be between <em>m</em> and 65535, inclusive.
* The default <em>k</em> = 200 results in a normalized rank error of about 1.65%.
* Larger <em>k</em> will have smaller error but the sketch will be larger (and slower).
* @param m parameter controls the minimum level width in items. It can be 2, 4, 6 or 8.
* The DEFAULT_M, which is 8 is recommended. Other sizes of <em>m</em> should be considered
* experimental as they have not been as well characterized.
*/
KllHeapFloatsSketch(final int k, final int m) {
super(UPDATABLE);
KllHelper.checkM(m);
KllHelper.checkK(k, m);
this.levelsArr = new int[] {k, k};
this.readOnly = false;
this.k = k;
this.m = m;
this.n = 0;
this.minK = k;
this.isLevelZeroSorted = false;
this.minFloatItem = Float.NaN;
this.maxFloatItem = Float.NaN;
this.floatItems = new float[k];
}
/**
* Heapify constructor.
* @param srcMem Memory object that contains data serialized by this sketch.
* @param memValidate the MemoryValidate object
*/
private KllHeapFloatsSketch(
final Memory srcMem,
final KllMemoryValidate memValidate) {
super(UPDATABLE);
final SketchStructure memStructure = memValidate.sketchStructure;
this.k = memValidate.k;
this.m = memValidate.m;
this.n = memValidate.n;
this.minK = memValidate.minK;
this.levelsArr = memValidate.levelsArr; //normalized to full
this.isLevelZeroSorted = memValidate.level0SortedFlag;
if (memStructure == COMPACT_EMPTY) {
minFloatItem = Float.NaN;
maxFloatItem = Float.NaN;
floatItems = new float[k];
}
else if (memStructure == COMPACT_SINGLE) {
final float item = srcMem.getFloat(DATA_START_ADR_SINGLE_ITEM);
minFloatItem = maxFloatItem = item;
floatItems = new float[k];
floatItems[k - 1] = item;
}
else if (memStructure == COMPACT_FULL) {
int offsetBytes = DATA_START_ADR;
offsetBytes += (levelsArr.length - 1) * Integer.BYTES; //shortened levelsArr
minFloatItem = srcMem.getFloat(offsetBytes);
offsetBytes += Float.BYTES;
maxFloatItem = srcMem.getFloat(offsetBytes);
offsetBytes += Float.BYTES;
final int capacityItems = levelsArr[getNumLevels()];
final int garbageItems = levelsArr[0];
final int retainedItems = capacityItems - garbageItems;
floatItems = new float[capacityItems];
srcMem.getFloatArray(offsetBytes, floatItems, garbageItems, retainedItems);
}
else { //(memStructure == UPDATABLE)
int offsetBytes = DATA_START_ADR;
offsetBytes += levelsArr.length * Integer.BYTES; //full levelsArr
minFloatItem = srcMem.getFloat(offsetBytes);
offsetBytes += Float.BYTES;
maxFloatItem = srcMem.getFloat(offsetBytes);
offsetBytes += Float.BYTES;
final int capacityItems = levelsArr[getNumLevels()];
floatItems = new float[capacityItems];
srcMem.getFloatArray(offsetBytes, floatItems, 0, capacityItems);
}
}
static KllHeapFloatsSketch heapifyImpl(final Memory srcMem) {
Objects.requireNonNull(srcMem, "Parameter 'srcMem' must not be null");
final KllMemoryValidate memVal = new KllMemoryValidate(srcMem, FLOATS_SKETCH);
return new KllHeapFloatsSketch(srcMem, memVal);
}
@Override
public int getK() { return k; }
@Override
public float getMaxItem() {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
return maxFloatItem;
}
@Override
public float getMinItem() {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
return minFloatItem;
}
@Override
public long getN() { return n; }
//restricted
@Override
float[] getFloatItemsArray() { return floatItems; }
@Override
float getFloatSingleItem() {
if (n != 1L) { throw new SketchesArgumentException(NOT_SINGLE_ITEM_MSG); }
return floatItems[k - 1];
}
@Override
int getM() { return m; }
@Override
MemoryRequestServer getMemoryRequestServer() { return null; }
@Override
int getMinK() { return minK; }
@Override
byte[] getMinMaxByteArr() {
final byte[] bytesOut = new byte[2 * Float.BYTES];
putFloatLE(bytesOut, 0, minFloatItem);
putFloatLE(bytesOut, Float.BYTES, maxFloatItem);
return bytesOut;
}
@Override
byte[] getRetainedItemsByteArr() {
if (isEmpty()) { return new byte[0]; }
final byte[] bytesOut;
if (isSingleItem()) {
bytesOut = new byte[Float.BYTES];
putFloatLE(bytesOut, 0, getFloatSingleItem());
return bytesOut;
}
final int retained = getNumRetained();
final int bytes = retained * Float.BYTES;
bytesOut = new byte[bytes];
final WritableMemory wmem = WritableMemory.writableWrap(bytesOut);
wmem.putFloatArray(0, floatItems, levelsArr[0], retained);
return bytesOut;
}
@Override
byte[] getTotalItemsByteArr() {
final byte[] byteArr = new byte[floatItems.length * Float.BYTES];
final WritableMemory wmem = WritableMemory.writableWrap(byteArr);
wmem.putFloatArray(0, floatItems, 0, floatItems.length);
return byteArr;
}
@Override
WritableMemory getWritableMemory() {
return null;
}
@Override
void incN() { n++; }
@Override
void incNumLevels() {
//the heap sketch computes num levels from the array itself, so this is not used on-heap
}
@Override
boolean isLevelZeroSorted() { return this.isLevelZeroSorted; }
@Override
void setFloatItemsArray(final float[] floatItems) { this.floatItems = floatItems; }
@Override
void setFloatItemsArrayAt(final int index, final float item) { this.floatItems[index] = item; }
@Override
void setLevelZeroSorted(final boolean sorted) { this.isLevelZeroSorted = sorted; }
@Override
void setMaxItem(final float item) { this.maxFloatItem = item; }
@Override
void setMinItem(final float item) { this.minFloatItem = item; }
@Override
void setMinK(final int minK) { this.minK = minK; }
@Override
void setN(final long n) { this.n = n; }
@Override
void setNumLevels(final int numLevels) {
//the heap sketch computes num levels from the array itself, so this is not used on-heap
}
@Override
float[] getFloatRetainedItemsArray() {
return Arrays.copyOfRange(floatItems, levelsArr[0], levelsArr[getNumLevels()]);
}
@Override
void setWritableMemory(final WritableMemory wmem) { } //inheritance dummy
}
| 2,645 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllDoublesSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static java.lang.Math.max;
import static java.lang.Math.min;
import static org.apache.datasketches.common.ByteArrayUtil.putDoubleLE;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.UPDATABLE;
import static org.apache.datasketches.kll.KllSketch.SketchType.DOUBLES_SKETCH;
import static org.apache.datasketches.quantilescommon.QuantilesUtil.equallyWeightedRanks;
import java.util.Objects;
import org.apache.datasketches.common.ArrayOfItemsSerDe;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.common.SuppressFBWarnings;
import org.apache.datasketches.kll.KllDirectDoublesSketch.KllDirectCompactDoublesSketch;
import org.apache.datasketches.memory.DefaultMemoryRequestServer;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableMemory;
import org.apache.datasketches.quantilescommon.DoublesSortedView;
import org.apache.datasketches.quantilescommon.QuantileSearchCriteria;
import org.apache.datasketches.quantilescommon.QuantilesDoublesAPI;
import org.apache.datasketches.quantilescommon.QuantilesDoublesSketchIterator;
/**
* This variation of the KllSketch implements primitive doubles.
*
* @see org.apache.datasketches.kll.KllSketch
*/
public abstract class KllDoublesSketch extends KllSketch implements QuantilesDoublesAPI {
private KllDoublesSketchSortedView kllDoublesSV = null;
final static int ITEM_BYTES = Double.BYTES;
KllDoublesSketch(
final SketchStructure sketchStructure) {
super(SketchType.DOUBLES_SKETCH, sketchStructure);
}
//Factories for new heap instances.
/**
* Create a new heap instance of this sketch with the default <em>k = 200</em>.
* The default <em>k</em> = 200 results in a normalized rank error of about
* 1.65%. Larger K will have smaller error but the sketch will be larger (and slower).
* @return new KllDoublesSketch on the Java heap.
*/
public static KllDoublesSketch newHeapInstance() {
return newHeapInstance(DEFAULT_K);
}
/**
* Create a new heap instance of this sketch with a given parameter <em>k</em>.
* <em>k</em> can be between 8, inclusive, and 65535, inclusive.
* The default <em>k</em> = 200 results in a normalized rank error of about
* 1.65%. Larger K will have smaller error but the sketch will be larger (and slower).
* @param k parameter that controls size of the sketch and accuracy of estimates.
* @return new KllDoublesSketch on the Java heap.
*/
public static KllDoublesSketch newHeapInstance(final int k) {
return new KllHeapDoublesSketch(k, DEFAULT_M);
}
//Factories for new direct instances.
/**
* Create a new direct updatable instance of this sketch with the default <em>k</em>.
* The default <em>k</em> = 200 results in a normalized rank error of about
* 1.65%. Larger <em>k</em> will have smaller error but the sketch will be larger (and slower).
* @param dstMem the given destination WritableMemory object for use by the sketch
* @param memReqSvr the given MemoryRequestServer to request a larger WritableMemory
* @return a new direct instance of this sketch
*/
public static KllDoublesSketch newDirectInstance(
final WritableMemory dstMem,
final MemoryRequestServer memReqSvr) {
return newDirectInstance(DEFAULT_K, dstMem, memReqSvr);
}
/**
* Create a new direct updatable instance of this sketch with a given <em>k</em>.
* @param k parameter that controls size of the sketch and accuracy of estimates.
* @param dstMem the given destination WritableMemory object for use by the sketch
* @param memReqSvr the given MemoryRequestServer to request a larger WritableMemory
* @return a new direct instance of this sketch
*/
public static KllDoublesSketch newDirectInstance(
final int k,
final WritableMemory dstMem,
final MemoryRequestServer memReqSvr) {
Objects.requireNonNull(dstMem, "Parameter 'dstMem' must not be null");
Objects.requireNonNull(memReqSvr, "Parameter 'memReqSvr' must not be null");
return KllDirectDoublesSketch.newDirectUpdatableInstance(k, DEFAULT_M, dstMem, memReqSvr);
}
//Factory to create an heap instance from a Memory image
/**
* Factory heapify takes a compact sketch image in Memory and instantiates an on-heap sketch.
* The resulting sketch will not retain any link to the source Memory.
* @param srcMem a compact Memory image of a sketch serialized by this sketch.
* <a href="{@docRoot}/resources/dictionary.html#mem">See Memory</a>
* @return a heap-based sketch based on the given Memory.
*/
public static KllDoublesSketch heapify(final Memory srcMem) {
Objects.requireNonNull(srcMem, "Parameter 'srcMem' must not be null");
return KllHeapDoublesSketch.heapifyImpl(srcMem);
}
//Factory to wrap a Read-Only Memory
/**
* Wrap a sketch around the given read only compact source Memory containing sketch data
* that originated from this sketch.
* @param srcMem the read only source Memory
* @return instance of this sketch
*/
public static KllDoublesSketch wrap(final Memory srcMem) {
Objects.requireNonNull(srcMem, "Parameter 'srcMem' must not be null");
final KllMemoryValidate memVal = new KllMemoryValidate(srcMem, DOUBLES_SKETCH, null);
if (memVal.sketchStructure == UPDATABLE) {
final MemoryRequestServer memReqSvr = new DefaultMemoryRequestServer(); //dummy
return new KllDirectDoublesSketch(memVal.sketchStructure, (WritableMemory)srcMem, memReqSvr, memVal);
} else {
return new KllDirectCompactDoublesSketch(memVal.sketchStructure, srcMem, memVal);
}
}
//Factory to wrap a WritableMemory image
/**
* Wrap a sketch around the given source Writable Memory containing sketch data
* that originated from this sketch.
* @param srcMem a WritableMemory that contains data.
* @param memReqSvr the given MemoryRequestServer to request a larger WritableMemory
* @return instance of this sketch
*/
public static KllDoublesSketch writableWrap(
final WritableMemory srcMem,
final MemoryRequestServer memReqSvr) {
Objects.requireNonNull(srcMem, "Parameter 'srcMem' must not be null");
Objects.requireNonNull(memReqSvr, "Parameter 'memReqSvr' must not be null");
final KllMemoryValidate memVal = new KllMemoryValidate(srcMem, DOUBLES_SKETCH);
if (memVal.sketchStructure == UPDATABLE) {
return new KllDirectDoublesSketch(UPDATABLE, srcMem, memReqSvr, memVal);
} else {
return new KllDirectCompactDoublesSketch(memVal.sketchStructure, srcMem, memVal);
}
}
//END of Constructors
@Override
public double[] getCDF(final double[] splitPoints, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
refreshSortedView();
return kllDoublesSV.getCDF(splitPoints, searchCrit);
}
@Override
public DoublesPartitionBoundaries getPartitionBoundaries(final int numEquallyWeighted,
final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
final double[] ranks = equallyWeightedRanks(numEquallyWeighted);
final double[] boundaries = getQuantiles(ranks, searchCrit);
boundaries[0] = getMinItem();
boundaries[boundaries.length - 1] = getMaxItem();
final DoublesPartitionBoundaries dpb = new DoublesPartitionBoundaries();
dpb.N = this.getN();
dpb.ranks = ranks;
dpb.boundaries = boundaries;
return dpb;
}
@Override
public double[] getPMF(final double[] splitPoints, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
refreshSortedView();
return kllDoublesSV.getPMF(splitPoints, searchCrit);
}
@Override
public double getQuantile(final double rank, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
refreshSortedView();
return kllDoublesSV.getQuantile(rank, searchCrit);
}
@Override
public double[] getQuantiles(final double[] ranks, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
refreshSortedView();
final int len = ranks.length;
final double[] quantiles = new double[len];
for (int i = 0; i < len; i++) {
quantiles[i] = kllDoublesSV.getQuantile(ranks[i], searchCrit);
}
return quantiles;
}
/**
* {@inheritDoc}
* The approximate probability that the true quantile is within the confidence interval
* specified by the upper and lower quantile bounds for this sketch is 0.99.
*/
@Override
public double getQuantileLowerBound(final double rank) {
return getQuantile(max(0, rank - KllHelper.getNormalizedRankError(getMinK(), false)));
}
/**
* {@inheritDoc}
* The approximate probability that the true quantile is within the confidence interval
* specified by the upper and lower quantile bounds for this sketch is 0.99.
*/
@Override
public double getQuantileUpperBound(final double rank) {
return getQuantile(min(1.0, rank + KllHelper.getNormalizedRankError(getMinK(), false)));
}
@Override
public double getRank(final double quantile, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
refreshSortedView();
return kllDoublesSV.getRank(quantile, searchCrit);
}
/**
* {@inheritDoc}
* The approximate probability that the true rank is within the confidence interval
* specified by the upper and lower rank bounds for this sketch is 0.99.
*/
@Override
public double getRankLowerBound(final double rank) {
return max(0.0, rank - KllHelper.getNormalizedRankError(getMinK(), false));
}
/**
* {@inheritDoc}
* The approximate probability that the true rank is within the confidence interval
* specified by the upper and lower rank bounds for this sketch is 0.99.
*/
@Override
public double getRankUpperBound(final double rank) {
return min(1.0, rank + KllHelper.getNormalizedRankError(getMinK(), false));
}
@Override
public double[] getRanks(final double[] quantiles, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
refreshSortedView();
final int len = quantiles.length;
final double[] ranks = new double[len];
for (int i = 0; i < len; i++) {
ranks[i] = kllDoublesSV.getRank(quantiles[i], searchCrit);
}
return ranks;
}
@Override
@SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = "OK in this case.")
public DoublesSortedView getSortedView() {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
refreshSortedView();
return kllDoublesSV;
}
@Override
public QuantilesDoublesSketchIterator iterator() {
return new KllDoublesSketchIterator(
getDoubleItemsArray(), getLevelsArray(SketchStructure.UPDATABLE), getNumLevels());
}
@Override
public final void merge(final KllSketch other) {
if (readOnly || sketchStructure != UPDATABLE) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
final KllDoublesSketch othDblSk = (KllDoublesSketch)other;
if (othDblSk.isEmpty()) { return; }
KllDoublesHelper.mergeDoubleImpl(this, othDblSk);
kllDoublesSV = null;
}
/**
* {@inheritDoc}
* <p>The parameter <i>k</i> will not change.</p>
*/
@Override
public final void reset() {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
final int k = getK();
setN(0);
setMinK(k);
setNumLevels(1);
setLevelZeroSorted(false);
setLevelsArray(new int[] {k, k});
setMinItem(Double.NaN);
setMaxItem(Double.NaN);
setDoubleItemsArray(new double[k]);
kllDoublesSV = null;
}
@Override
public byte[] toByteArray() {
return KllHelper.toByteArray(this, false);
}
@Override
public void update(final double item) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
KllDoublesHelper.updateDouble(this, item);
kllDoublesSV = null;
}
//restricted
/**
* @return full size of internal items array including empty space at bottom.
*/
abstract double[] getDoubleItemsArray();
/**
* @return items array of retained items.
*/
abstract double[] getDoubleRetainedItemsArray();
abstract double getDoubleSingleItem();
@Override
abstract byte[] getMinMaxByteArr();
@Override
int getMinMaxSizeBytes() {
return Double.BYTES * 2;
}
@Override
abstract byte[] getRetainedItemsByteArr();
@Override
int getRetainedItemsSizeBytes() {
return getNumRetained() * Double.BYTES;
}
@Override
ArrayOfItemsSerDe<?> getSerDe() { return null; }
@Override
final byte[] getSingleItemByteArr() {
final byte[] bytes = new byte[ITEM_BYTES];
putDoubleLE(bytes, 0, getDoubleSingleItem());
return bytes;
}
@Override
int getSingleItemSizeBytes() {
return Double.BYTES;
}
@Override
abstract byte[] getTotalItemsByteArr();
@Override
int getTotalItemsNumBytes() {
return levelsArr[getNumLevels()] * Double.BYTES;
}
private final void refreshSortedView() {
kllDoublesSV = (kllDoublesSV == null)
? new KllDoublesSketchSortedView(this) : kllDoublesSV;
}
abstract void setDoubleItemsArray(double[] doubleItems);
abstract void setDoubleItemsArrayAt(int index, double item);
abstract void setMaxItem(double item);
abstract void setMinItem(double item);
}
| 2,646 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static org.apache.datasketches.kll.KllPreambleUtil.DATA_START_ADR;
import static org.apache.datasketches.kll.KllPreambleUtil.DATA_START_ADR_SINGLE_ITEM;
import static org.apache.datasketches.kll.KllPreambleUtil.N_LONG_ADR;
import static org.apache.datasketches.kll.KllPreambleUtil.PREAMBLE_INTS_EMPTY_SINGLE;
import static org.apache.datasketches.kll.KllPreambleUtil.PREAMBLE_INTS_FULL;
import static org.apache.datasketches.kll.KllPreambleUtil.SERIAL_VERSION_EMPTY_FULL;
import static org.apache.datasketches.kll.KllPreambleUtil.SERIAL_VERSION_SINGLE;
import static org.apache.datasketches.kll.KllPreambleUtil.SERIAL_VERSION_UPDATABLE;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_EMPTY;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_FULL;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_SINGLE;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.UPDATABLE;
import static org.apache.datasketches.kll.KllSketch.SketchType.DOUBLES_SKETCH;
import static org.apache.datasketches.kll.KllSketch.SketchType.FLOATS_SKETCH;
import static org.apache.datasketches.kll.KllSketch.SketchType.ITEMS_SKETCH;
import java.util.Arrays;
import java.util.Random;
import org.apache.datasketches.common.ArrayOfItemsSerDe;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableMemory;
import org.apache.datasketches.quantilescommon.QuantilesAPI;
/*
* Sampled stream data (floats, doubles, or items) is stored as a heap array called itemsArr or as part of a
* WritableMemory/Memory object.
* This array is partitioned into sections called levels and the indices into the array of items
* are tracked by a small integer array called levelsArr.
* The data for level i lies in positions levelsArr[i] through levelsArr[i + 1] - 1 inclusive.
* Hence, the levelsArr must contain (numLevels + 1) elements.
* The valid portion of the itemsArr is completely packed and sorted, except for level 0,
* which is filled from the top down. Any items below the index levelsArr[0] is garbage and will be
* overwritten by subsequent updates.
*
* Invariants:
* 1) After a compaction, update, or a merge, every level is sorted except for level zero.
* 2) After a compaction, (sum of level capacities) - (number of valid items) >= 1,
* so there is room for least 1 more quantile in level zero.
* 3) There are no gaps except at the bottom, so if levelsArr[0] = 0,
* the sketch is exactly filled to capacity and must be compacted or the itemsArr and levelsArr
* must be expanded to include more levels.
* 4) Sum of weights of all retained, valid items = N.
* 5) Current total item capacity = itemsArr.length = levelsArr[numLevels].
*/
/**
* This class is the root of the KLL sketch class hierarchy. It includes the public API that is independent
* of either sketch type (e.g., float, double or generic item) and independent of whether the sketch is targeted
* for use on the Java heap or off-heap.
*
* <p>KLL is an implementation of a very compact quantiles sketch with lazy compaction scheme
* and nearly optimal accuracy per retained quantile.</p>
*
* <p>Reference <a href="https://arxiv.org/abs/1603.05346v2">Optimal Quantile Approximation in Streams</a>.</p>
*
* <p>The default <i>k</i> of 200 yields a "single-sided" epsilon of about 1.33% and a
* "double-sided" (PMF) epsilon of about 1.65%, with a confidence of 99%.</p>
*
* @see <a href="https://datasketches.apache.org/docs/KLL/KLLSketch.html">KLL Sketch</a>
* @see QuantilesAPI
*
* @author Lee Rhodes
* @author Kevin Lang
* @author Alexander Saydakov
*/
public abstract class KllSketch implements QuantilesAPI {
/**
* The default K
*/
public static final int DEFAULT_K = 200;
/**
* The maximum K
*/
public static final int MAX_K = (1 << 16) - 1; // serialized as an unsigned short
/**
* The default M. The parameter <i>m</i> is the minimum level size in number of quantiles.
* Currently, the public default is 8, but this can be overridden using Package Private methods to
* 2, 4, 6 or 8, and the sketch works just fine. The number 8 was chosen as a compromise between speed and size.
* Choosing an <i>m</i> smaller than 8 will make the sketch slower.
*/
static final int DEFAULT_M = 8;
static final int MAX_M = 8; //The maximum M
static final int MIN_M = 2; //The minimum M
static final Random random = new Random();
final SketchType sketchType;
final SketchStructure sketchStructure;
boolean readOnly;
int[] levelsArr; //Always writable form
/**
* Constructor for on-heap and off-heap.
* If both wmem and memReqSvr are null, this is a heap constructor.
* If wmem != null and wmem is not readOnly, then memReqSvr must not be null.
* If wmem was derived from an original Memory instance via a cast, it will be readOnly.
* @param sketchType either DOUBLES_SKETCH, FLOATS_SKETCH or ITEMS_SKETCH
* @param wmem the current WritableMemory or null
*/
KllSketch(
final SketchType sketchType,
final SketchStructure sketchStructure) {
this.sketchType = sketchType;
this.sketchStructure = sketchStructure;
}
/**
* Gets the approximate <em>k</em> to use given epsilon, the normalized rank error.
* @param epsilon the normalized rank error between zero and one.
* @param pmf if true, this function returns the <em>k</em> assuming the input epsilon
* is the desired "double-sided" epsilon for the getPMF() function. Otherwise, this function
* returns <em>k</em> assuming the input epsilon is the desired "single-sided"
* epsilon for all the other queries.
* @return <i>k</i> given epsilon.
*/
public static int getKFromEpsilon(final double epsilon, final boolean pmf) {
return KllHelper.getKFromEpsilon(epsilon, pmf);
}
/**
* Returns upper bound on the serialized size of a KllSketch given the following parameters.
* @param k parameter that controls size of the sketch and accuracy of estimates
* @param n stream length
* @param sketchType Only DOUBLES_SKETCH and FLOATS_SKETCH is supported for this operation.
* @param updatableMemFormat true if updatable Memory format, otherwise the standard compact format.
* @return upper bound on the serialized size of a KllSketch.
*/
public static int getMaxSerializedSizeBytes(final int k, final long n,
final SketchType sketchType, final boolean updatableMemFormat) {
if (sketchType == ITEMS_SKETCH) { throw new SketchesArgumentException(UNSUPPORTED_MSG); }
final KllHelper.GrowthStats gStats =
KllHelper.getGrowthSchemeForGivenN(k, DEFAULT_M, n, sketchType, false);
return updatableMemFormat ? gStats.updatableBytes : gStats.compactBytes;
}
/**
* Gets the normalized rank error given k and pmf.
* Static method version of the <i>getNormalizedRankError(boolean)</i>.
* The epsilon returned is a best fit to 99 percent confidence empirically measured max error
* in thousands of trials.
* @param k the configuration parameter
* @param pmf if true, returns the "double-sided" normalized rank error for the getPMF() function.
* Otherwise, it is the "single-sided" normalized rank error for all the other queries.
* @return if pmf is true, the normalized rank error for the getPMF() function.
* Otherwise, it is the "single-sided" normalized rank error for all the other queries.
*/
public static double getNormalizedRankError(final int k, final boolean pmf) {
return KllHelper.getNormalizedRankError(k, pmf);
}
/**
* Gets the approximate rank error of this sketch normalized as a fraction between zero and one.
* The epsilon returned is a best fit to 99 percent confidence empirically measured max error
* in thousands of trials.
* @param pmf if true, returns the "double-sided" normalized rank error for the getPMF() function.
* Otherwise, it is the "single-sided" normalized rank error for all the other queries.
* @return if pmf is true, returns the "double-sided" normalized rank error for the getPMF() function.
* Otherwise, it is the "single-sided" normalized rank error for all the other queries.
*/
public final double getNormalizedRankError(final boolean pmf) {
return getNormalizedRankError(getMinK(), pmf);
}
@Override
public final int getNumRetained() {
return levelsArr[getNumLevels()] - levelsArr[0];
}
/**
* Returns the current number of bytes this Sketch would require if serialized in compact form.
* @return the number of bytes this sketch would require if serialized.
*/
public int getSerializedSizeBytes() {
//current policy is that public method cannot return Updatable structure:
return currentSerializedSizeBytes(false);
}
@Override
public boolean hasMemory() {
final WritableMemory wmem = getWritableMemory();
return (wmem != null);
}
public boolean isCompactMemoryFormat() {
return hasMemory() && sketchStructure != UPDATABLE;
}
@Override
public boolean isDirect() {
final WritableMemory wmem = getWritableMemory();
return (wmem != null) ? wmem.isDirect() : false;
}
@Override
public final boolean isEmpty() {
return getN() == 0;
}
@Override
public final boolean isEstimationMode() {
return getNumLevels() > 1;
}
/**
* Returns true if the backing WritableMemory is in updatable format.
* @return true if the backing WritableMemory is in updatable format.
*/
public final boolean isMemoryUpdatableFormat() {
return hasMemory() && sketchStructure == UPDATABLE;
}
@Override
public final boolean isReadOnly() {
return readOnly;
}
/**
* Returns true if the backing resource of <i>this</i> is identical with the backing resource
* of <i>that</i>. The capacities must be the same. If <i>this</i> is a region,
* the region offset must also be the same.
* @param that A different non-null object
* @return true if the backing resource of <i>this</i> is the same as the backing resource
* of <i>that</i>.
*/
public final boolean isSameResource(final Memory that) {
final WritableMemory wmem = getWritableMemory();
return (wmem != null) && wmem.isSameResource(that);
}
/**
* Merges another sketch into this one.
* Attempting to merge a sketch of the wrong type will throw an exception.
* @param other sketch to merge into this one
*/
public abstract void merge(KllSketch other);
@Override
public final String toString() {
return toString(false, false);
}
/**
* Returns a summary of the sketch as a string.
* @param withLevels if true include information about levels
* @param withData if true include sketch data
* @return string representation of sketch summary
*/
public String toString(final boolean withLevels, final boolean withData) {
return KllHelper.toStringImpl(this, withLevels, withData, getSerDe());
}
//restricted
/**
* Compute serialized size in bytes independent of the current sketch.
* For KllItemsSketch the result is always in non-updatable, compact form.
* @param updatable true if the desired result is for updatable structure.
* @return serialized size in bytes given a SketchStructure.
*/
final int currentSerializedSizeBytes(final boolean updatable) {
final boolean myUpdatable = sketchType == ITEMS_SKETCH ? false : updatable;
final long srcN = this.getN();
final SketchStructure tgtStructure;
if (myUpdatable) { tgtStructure = UPDATABLE; }
else if (srcN == 0) { tgtStructure = COMPACT_EMPTY; }
else if (srcN == 1) { tgtStructure = COMPACT_SINGLE; }
else { tgtStructure = COMPACT_FULL; }
final int totalBytes;
if (tgtStructure == COMPACT_EMPTY) {
totalBytes = N_LONG_ADR;
}
else if (tgtStructure == COMPACT_SINGLE) {
totalBytes = DATA_START_ADR_SINGLE_ITEM
+ getSingleItemSizeBytes();
}
else if (tgtStructure == COMPACT_FULL) {
totalBytes = DATA_START_ADR
+ getLevelsArrSizeBytes(tgtStructure)
+ getMinMaxSizeBytes()
+ getRetainedItemsSizeBytes();
}
else { //structure = UPDATABLE
totalBytes = DATA_START_ADR
+ getLevelsArrSizeBytes(tgtStructure)
+ getMinMaxSizeBytes()
+ getTotalItemsNumBytes();
}
return totalBytes;
}
int[] getLevelsArray(final SketchStructure structure) {
if (structure == UPDATABLE) { return levelsArr.clone(); }
else if (structure == COMPACT_FULL) { return Arrays.copyOf(levelsArr, levelsArr.length - 1); }
else { return new int[0]; }
}
final int getLevelsArrSizeBytes(final SketchStructure structure) {
if (structure == UPDATABLE) { return levelsArr.length * Integer.BYTES; }
else if (structure == COMPACT_FULL) { return (levelsArr.length - 1) * Integer.BYTES; }
else { return 0; }
}
/**
* Returns the configured parameter <i>m</i>, which is the minimum level size in number of items.
* Currently, the public default is 8, but this can be overridden using Package Private methods to
* 2, 4, 6 or 8, and the sketch works just fine. The number 8 was chosen as a compromise between speed and size.
* Choosing smaller <i>m</i> will make the sketch much slower.
* @return the configured parameter m
*/
abstract int getM();
/**
* Gets the MemoryRequestServer or null.
* @return the MemoryRequestServer or null.
*/
abstract MemoryRequestServer getMemoryRequestServer();
/**
* MinK is the K that results from a merge with a sketch configured with a K lower than
* the K of this sketch. This is then used in computing the estimated upper and lower bounds of error.
* @return The minimum K as a result of merging sketches with lower k.
*/
abstract int getMinK();
/**
* Gets the combined minItem and maxItem in a serialized byte array.
* @return the combined minItem and maxItem in a serialized byte array.
*/
abstract byte[] getMinMaxByteArr();
/**
* Gets the size in bytes of the combined minItem and maxItem serialized byte array.
* @return the size in bytes of the combined minItem and maxItem serialized byte array.
*/
abstract int getMinMaxSizeBytes();
/**
* Gets the current number of levels
* @return the current number of levels
*/
final int getNumLevels() {
if (sketchStructure == UPDATABLE || sketchStructure == COMPACT_FULL) { return levelsArr.length - 1; }
return 1;
}
/**
* Gets the serialized byte array of the valid retained items as a byte array.
* It does not include the preamble, the levels array, minimum or maximum items, or garbage data.
* @return the serialized bytes of the retained data.
*/
abstract byte[] getRetainedItemsByteArr();
/**
* Gets the size in bytes of the valid retained items.
* It does not include the preamble, the levels array, minimum or maximum items, or garbage data.
* @return the size of the retained data in bytes.
*/
abstract int getRetainedItemsSizeBytes();
/**
* Gets the serializer / deserializer or null.
* @return the serializer / deserializer or null.
*/
abstract ArrayOfItemsSerDe<?> getSerDe();
/**
* Gets the serialized byte array of the Single Item that corresponds to the Single Item Flag being true.
* @return the serialized byte array of the Single Item.
*/
abstract byte[] getSingleItemByteArr();
/**
* Gets the size in bytes of the serialized Single Item that corresponds to the Single Item Flag being true.
* @return the size in bytes of the serialized Single Item.
*/
abstract int getSingleItemSizeBytes();
/**
* Gets the serialized byte array of the entire internal items hypothetical structure.
* It does not include the preamble, the levels array, or minimum or maximum items.
* It may include empty or garbage items.
* @return the serialized bytes of the retained data.
*/
abstract byte[] getTotalItemsByteArr();
/**
* Gets the size in bytes of the entire internal items hypothetical structure.
* It does not include the preamble, the levels array, or minimum or maximum items.
* It may include empty or garbage items.
* @return the size of the retained data in bytes.
*/
abstract int getTotalItemsNumBytes();
/**
* This returns the WritableMemory for Direct type sketches,
* otherwise returns null.
* @return the WritableMemory for Direct type sketches, otherwise null.
*/
abstract WritableMemory getWritableMemory();
abstract void incN();
abstract void incNumLevels();
final boolean isCompactSingleItem() {
return hasMemory() && sketchStructure == COMPACT_SINGLE && (getN() == 1);
}
boolean isDoublesSketch() { return sketchType == DOUBLES_SKETCH; }
boolean isFloatsSketch() { return sketchType == FLOATS_SKETCH; }
boolean isItemsSketch() { return sketchType == ITEMS_SKETCH; }
abstract boolean isLevelZeroSorted();
/**
* @return true if N == 1.
*/
boolean isSingleItem() { return getN() == 1; }
final void setLevelsArray(final int[] levelsArr) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
this.levelsArr = levelsArr;
final WritableMemory wmem = getWritableMemory();
if (wmem != null) {
wmem.putIntArray(DATA_START_ADR, this.levelsArr, 0, levelsArr.length);
}
}
final void setLevelsArrayAt(final int index, final int idxVal) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
this.levelsArr[index] = idxVal;
final WritableMemory wmem = getWritableMemory();
if (wmem != null) {
final int offset = DATA_START_ADR + index * Integer.BYTES;
wmem.putInt(offset, idxVal);
}
}
abstract void setLevelZeroSorted(boolean sorted);
abstract void setMinK(int minK);
abstract void setN(long n);
abstract void setNumLevels(int numLevels);
abstract void setWritableMemory(final WritableMemory wmem);
/**
* Used to define the variable type of the current instance of this class.
*/
public enum SketchType {
DOUBLES_SKETCH(Double.BYTES, "DoublesSketch"),
FLOATS_SKETCH(Float.BYTES, "FloatsSketch"),
ITEMS_SKETCH(0, "ItemsSketch");
private int typeBytes;
private String name;
private SketchType(final int typeBytes, final String name) {
this.typeBytes = typeBytes;
this.name = name;
}
public int getBytes() { return typeBytes; }
public String getName() { return name; }
}
/**
* Used primarily to define the structure of the serialized sketch. Also used by the Heap Sketch.
*/
public enum SketchStructure {
COMPACT_EMPTY(PREAMBLE_INTS_EMPTY_SINGLE, SERIAL_VERSION_EMPTY_FULL),
COMPACT_SINGLE(PREAMBLE_INTS_EMPTY_SINGLE, SERIAL_VERSION_SINGLE),
COMPACT_FULL(PREAMBLE_INTS_FULL, SERIAL_VERSION_EMPTY_FULL),
UPDATABLE(PREAMBLE_INTS_FULL, SERIAL_VERSION_UPDATABLE); //also used by the heap sketch.
private int preInts;
private int serVer;
private SketchStructure(final int preInts, final int serVer) {
this.preInts = preInts;
this.serVer = serVer;
}
public int getPreInts() { return preInts; }
public int getSerVer() { return serVer; }
public static SketchStructure getSketchStructure(final int preInts, final int serVer) {
final SketchStructure[] ssArr = SketchStructure.values();
for (int i = 0; i < ssArr.length; i++) {
if (ssArr[i].preInts == preInts && ssArr[i].serVer == serVer) {
return ssArr[i];
}
}
throw new SketchesArgumentException("Error combination of PreInts and SerVer: "
+ "PreInts: " + preInts + ", SerVer: " + serVer);
}
}
}
| 2,647 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllDirectCompactItemsSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static org.apache.datasketches.kll.KllPreambleUtil.DATA_START_ADR;
import static org.apache.datasketches.kll.KllPreambleUtil.DATA_START_ADR_SINGLE_ITEM;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryK;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryLevelZeroSortedFlag;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryM;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryMinK;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryN;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_EMPTY;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_SINGLE;
import java.lang.reflect.Array;
import java.util.Comparator;
import org.apache.datasketches.common.ArrayOfItemsSerDe;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
/**
* This class implements an off-heap, read-only KllItemsSketch using WritableMemory.
*
* <p>Please refer to the documentation in the package-info:<br>
* {@link org.apache.datasketches.kll}</p>
*
* @author Lee Rhodes, Kevin Lang
*/
@SuppressWarnings("unchecked")
final class KllDirectCompactItemsSketch<T> extends KllItemsSketch<T> {
private Memory mem;
/**
* Internal implementation of the wrapped Memory KllSketch.
* @param memVal the MemoryValadate object
* @param comparator to compare items
* @param serDe Serializer / deserializer for items of type <i>T</i> and <i>T[]</i>.
*/
KllDirectCompactItemsSketch( //called below and KllItemsSketch
final KllMemoryValidate memVal,
final Comparator<? super T> comparator,
final ArrayOfItemsSerDe<T> serDe) {
super(memVal.sketchStructure, comparator, serDe);
this.mem = memVal.srcMem;
readOnly = true;
levelsArr = memVal.levelsArr; //always converted to writable form.
}
@Override
public int getK() {
return getMemoryK(mem);
}
@Override
public T getMaxItem() {
if (sketchStructure == COMPACT_EMPTY || isEmpty()) {
throw new SketchesArgumentException(EMPTY_MSG);
}
if (sketchStructure == COMPACT_SINGLE) {
return serDe.deserializeFromMemory(mem, DATA_START_ADR_SINGLE_ITEM, 1)[0];
}
//sketchStructure == COMPACT_FULL
final int offset = DATA_START_ADR + getNumLevels() * Integer.BYTES;
return serDe.deserializeFromMemory(mem, offset, 2)[1];
}
@Override
public T getMinItem() {
if (sketchStructure == COMPACT_EMPTY || isEmpty()) {
throw new SketchesArgumentException(EMPTY_MSG);
}
if (sketchStructure == COMPACT_SINGLE) {
return serDe.deserializeFromMemory(mem, DATA_START_ADR_SINGLE_ITEM, 1)[0];
}
//sketchStructure == COMPACT_FULL
final int offset = DATA_START_ADR + getNumLevels() * Integer.BYTES;
return serDe.deserializeFromMemory(mem, offset, 1)[0];
}
@Override
public long getN() {
if (sketchStructure == COMPACT_EMPTY) { return 0; }
if (sketchStructure == COMPACT_SINGLE) { return 1; }
return getMemoryN(mem);
}
//restricted
private int getCompactDataOffset() { //Sketch cannot be empty
return sketchStructure == COMPACT_SINGLE
? DATA_START_ADR_SINGLE_ITEM
: DATA_START_ADR + getNumLevels() * Integer.BYTES + getMinMaxSizeBytes();
}
@Override
int getM() {
return getMemoryM(mem);
}
@Override
int getMinK() {
if (sketchStructure == COMPACT_EMPTY || sketchStructure == COMPACT_SINGLE) { return getMemoryK(mem); }
return getMemoryMinK(mem);
}
@Override
byte[] getMinMaxByteArr() { //this is only used by COMPACT_FULL
final int offset = DATA_START_ADR + getNumLevels() * Integer.BYTES;
final int bytesMinMax = serDe.sizeOf(mem, offset, 2);
final byte[] byteArr = new byte[bytesMinMax];
mem.getByteArray(offset, byteArr, 0, bytesMinMax);
return byteArr;
}
@Override
int getMinMaxSizeBytes() { //this is only used by COMPACT_FULL
final int offset = DATA_START_ADR + getNumLevels() * Integer.BYTES;
return serDe.sizeOf(mem, offset, 2);
}
@Override
T[] getRetainedItemsArray() {
final int numRet = getNumRetained();
if (sketchStructure == COMPACT_EMPTY || getN() == 0) {
return (T[]) Array.newInstance(serDe.getClassOfT(), numRet);
}
final int offset = getCompactDataOffset(); //both single & full
return serDe.deserializeFromMemory(mem, offset, numRet);
}
@Override
byte[] getRetainedItemsByteArr() {
if (sketchStructure == COMPACT_EMPTY || getN() == 0) { return new byte[0]; }
final int offset = getCompactDataOffset(); //both single & full
final int bytes = serDe.sizeOf(mem, offset, getNumRetained());
final byte[] byteArr = new byte[bytes];
mem.getByteArray(offset, byteArr, 0, bytes);
return byteArr;
}
@Override
int getRetainedItemsSizeBytes() {
if (sketchStructure == COMPACT_EMPTY || getN() == 0) { return 0; }
final int offset = getCompactDataOffset(); //both single & full
return serDe.sizeOf(mem, offset, getNumRetained());
}
@Override
T getSingleItem() {
if (getN() != 1) { throw new SketchesArgumentException(NOT_SINGLE_ITEM_MSG); }
final int offset = getCompactDataOffset(); //both single & full
return (serDe.deserializeFromMemory(mem, offset, 1)[0]);
}
@Override
byte[] getSingleItemByteArr() {
if (getN() != 1) { throw new SketchesArgumentException(NOT_SINGLE_ITEM_MSG); }
final int offset = getCompactDataOffset(); //both single & full
final int bytes = serDe.sizeOf(mem, offset, 1);
final byte[] byteArr = new byte[bytes];
mem.getByteArray(offset, byteArr, 0, bytes);
return byteArr;
}
@Override
int getSingleItemSizeBytes() {
if (getN() != 1) { throw new SketchesArgumentException(NOT_SINGLE_ITEM_MSG); }
final int offset = getCompactDataOffset(); //both single & full
final int bytes = serDe.sizeOf(mem, offset, 1);
return bytes;
}
@Override
T[] getTotalItemsArray() {
final int k = getK();
if (getN() == 0) { return (T[]) Array.newInstance(serDe.getClassOfT(), k); }
if (getN() == 1) {
final T[] itemsArr = (T[]) Array.newInstance(serDe.getClassOfT(), k);
itemsArr[k - 1] = getSingleItem();
return itemsArr;
}
final int offset = getCompactDataOffset();
final int numRetItems = getNumRetained();
final int numCapItems = levelsArr[getNumLevels()];
final T[] retItems = serDe.deserializeFromMemory(mem, offset, numRetItems);
final T[] capItems = (T[]) Array.newInstance(serDe.getClassOfT(), numCapItems);
System.arraycopy(retItems, 0, capItems, levelsArr[0], numRetItems);
return capItems;
}
@Override
WritableMemory getWritableMemory() {
return (WritableMemory)mem;
}
@Override
void incN() {
throw new SketchesArgumentException(UNSUPPORTED_MSG);
}
@Override
boolean isLevelZeroSorted() {
return getMemoryLevelZeroSortedFlag(mem);
}
@Override
void setItemsArray(final Object[] ItemsArr) {
throw new SketchesArgumentException(UNSUPPORTED_MSG);
}
@Override
void setItemsArrayAt(final int index, final Object item) {
throw new SketchesArgumentException(UNSUPPORTED_MSG);
}
@Override
void setLevelZeroSorted(final boolean sorted) {
throw new SketchesArgumentException(UNSUPPORTED_MSG);
}
@Override
void setMaxItem(final Object item) {
throw new SketchesArgumentException(UNSUPPORTED_MSG);
}
@Override
void setMinItem(final Object item) {
throw new SketchesArgumentException(UNSUPPORTED_MSG);
}
@Override
void setMinK(final int minK) {
throw new SketchesArgumentException(UNSUPPORTED_MSG);
}
@Override
void setN(final long n) {
throw new SketchesArgumentException(UNSUPPORTED_MSG);
}
}
| 2,648 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllDirectDoublesSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static org.apache.datasketches.common.ByteArrayUtil.copyBytes;
import static org.apache.datasketches.kll.KllPreambleUtil.DATA_START_ADR;
import static org.apache.datasketches.kll.KllPreambleUtil.DATA_START_ADR_SINGLE_ITEM;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryK;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryLevelZeroSortedFlag;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryM;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryMinK;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryN;
import static org.apache.datasketches.kll.KllPreambleUtil.getMemoryNumLevels;
import static org.apache.datasketches.kll.KllPreambleUtil.setMemoryFamilyID;
import static org.apache.datasketches.kll.KllPreambleUtil.setMemoryK;
import static org.apache.datasketches.kll.KllPreambleUtil.setMemoryLevelZeroSortedFlag;
import static org.apache.datasketches.kll.KllPreambleUtil.setMemoryM;
import static org.apache.datasketches.kll.KllPreambleUtil.setMemoryMinK;
import static org.apache.datasketches.kll.KllPreambleUtil.setMemoryN;
import static org.apache.datasketches.kll.KllPreambleUtil.setMemoryNumLevels;
import static org.apache.datasketches.kll.KllPreambleUtil.setMemoryPreInts;
import static org.apache.datasketches.kll.KllPreambleUtil.setMemorySerVer;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_EMPTY;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_FULL;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_SINGLE;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.UPDATABLE;
import static org.apache.datasketches.kll.KllSketch.SketchType.DOUBLES_SKETCH;
import org.apache.datasketches.common.ByteArrayUtil;
import org.apache.datasketches.common.Family;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableMemory;
/**
* This class implements an off-heap, updatable KllDoublesSketch using WritableMemory.
*
* <p>Please refer to the documentation in the package-info:<br>
* {@link org.apache.datasketches.kll}</p>
*
* @author Lee Rhodes, Kevin Lang
*/
class KllDirectDoublesSketch extends KllDoublesSketch {
private WritableMemory wmem;
private MemoryRequestServer memReqSvr;
/**
* Constructs from Memory or WritableMemory already initialized with a sketch image and validated.
* @param wmem the current WritableMemory
* @param memReqSvr the given MemoryRequestServer to request a larger WritableMemory
* @param memVal the MemoryValadate object
*/
KllDirectDoublesSketch(
final SketchStructure sketchStructure,
final WritableMemory wmem,
final MemoryRequestServer memReqSvr,
final KllMemoryValidate memVal) {
super(sketchStructure);
this.wmem = wmem;
this.memReqSvr = memReqSvr;
readOnly = (wmem != null && wmem.isReadOnly()) || sketchStructure != UPDATABLE;
levelsArr = memVal.levelsArr; //always converted to writable form.
}
/**
* Create a new updatable, direct instance of this sketch.
* @param k parameter that controls size of the sketch and accuracy of estimates
* @param m parameter that controls the minimum level width in items.
* @param dstMem the given destination WritableMemory object for use by the sketch
* @param memReqSvr the given MemoryRequestServer to request a larger WritableMemory
* @return a new instance of this sketch
*/
static KllDirectDoublesSketch newDirectUpdatableInstance(
final int k,
final int m,
final WritableMemory dstMem,
final MemoryRequestServer memReqSvr) {
setMemoryPreInts(dstMem, UPDATABLE.getPreInts());
setMemorySerVer(dstMem, UPDATABLE.getSerVer());
setMemoryFamilyID(dstMem, Family.KLL.getID());
setMemoryK(dstMem, k);
setMemoryM(dstMem, m);
setMemoryN(dstMem, 0);
setMemoryMinK(dstMem, k);
setMemoryNumLevels(dstMem, 1);
int offset = DATA_START_ADR;
//new Levels array
dstMem.putIntArray(offset, new int[] {k, k}, 0, 2);
offset += 2 * Integer.BYTES;
//new min/max array
dstMem.putDoubleArray(offset, new double[] {Double.NaN, Double.NaN}, 0, 2);
offset += 2 * ITEM_BYTES;
//new empty items array
dstMem.putDoubleArray(offset, new double[k], 0, k);
final KllMemoryValidate memVal = new KllMemoryValidate(dstMem, DOUBLES_SKETCH, null);
final WritableMemory wMem = dstMem;
return new KllDirectDoublesSketch(UPDATABLE, wMem, memReqSvr, memVal);
}
//END of Constructors
@Override
public int getK() {
return getMemoryK(wmem);
}
@Override
public double getMaxItem() {
int levelsArrBytes = 0;
if (sketchStructure == COMPACT_EMPTY || isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
else if (sketchStructure == COMPACT_SINGLE) { return getDoubleSingleItem(); }
else if (sketchStructure == COMPACT_FULL) {
levelsArrBytes = getLevelsArrSizeBytes(COMPACT_FULL);
} else { //UPDATABLE
levelsArrBytes = getLevelsArrSizeBytes(UPDATABLE);
}
final int offset = DATA_START_ADR + levelsArrBytes + ITEM_BYTES;
return wmem.getDouble(offset);
}
@Override
public double getMinItem() {
int levelsArrBytes = 0;
if (sketchStructure == COMPACT_EMPTY || isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
else if (sketchStructure == COMPACT_SINGLE) { return getDoubleSingleItem(); }
else if (sketchStructure == COMPACT_FULL) {
levelsArrBytes = getLevelsArrSizeBytes(COMPACT_FULL);
} else { //UPDATABLE
levelsArrBytes = getLevelsArrSizeBytes(UPDATABLE);
}
final int offset = DATA_START_ADR + levelsArrBytes;
return wmem.getDouble(offset);
}
@Override
public long getN() {
if (sketchStructure == COMPACT_EMPTY) { return 0; }
else if (sketchStructure == COMPACT_SINGLE) { return 1; }
else { return getMemoryN(wmem); }
}
//restricted
@Override //returns updatable, expanded array including empty/garbage space at bottom
double[] getDoubleItemsArray() {
final int k = getK();
if (sketchStructure == COMPACT_EMPTY) { return new double[k]; }
if (sketchStructure == COMPACT_SINGLE) {
final double[] itemsArr = new double[k];
itemsArr[k - 1] = getDoubleSingleItem();
return itemsArr;
}
final int capacityItems = KllHelper.computeTotalItemCapacity(k, getM(), getNumLevels());
final double[] doubleItemsArr = new double[capacityItems];
final int offset = DATA_START_ADR + getLevelsArrSizeBytes(sketchStructure) + 2 * ITEM_BYTES;
final int shift = (sketchStructure == COMPACT_FULL) ? levelsArr[0] : 0;
final int numItems = (sketchStructure == COMPACT_FULL) ? getNumRetained() : capacityItems;
wmem.getDoubleArray(offset, doubleItemsArr, shift, numItems);
return doubleItemsArr;
}
@Override //returns compact items array of retained items, no empty/garbage.
double[] getDoubleRetainedItemsArray() {
if (sketchStructure == COMPACT_EMPTY) { return new double[0]; }
if (sketchStructure == COMPACT_SINGLE) { return new double[] { getDoubleSingleItem() }; }
final int numRetained = getNumRetained();
final double[] doubleItemsArr = new double[numRetained];
final int offset = DATA_START_ADR + getLevelsArrSizeBytes(sketchStructure) + 2 * ITEM_BYTES
+ (sketchStructure == COMPACT_FULL ? 0 : levelsArr[0] * ITEM_BYTES);
wmem.getDoubleArray(offset, doubleItemsArr, 0, numRetained);
return doubleItemsArr;
}
@Override
double getDoubleSingleItem() {
if (!isSingleItem()) { throw new SketchesArgumentException(NOT_SINGLE_ITEM_MSG); }
if (sketchStructure == COMPACT_SINGLE) {
return wmem.getDouble(DATA_START_ADR_SINGLE_ITEM);
}
final int offset;
if (sketchStructure == COMPACT_FULL) {
offset = DATA_START_ADR + getLevelsArrSizeBytes(sketchStructure) + 2 * ITEM_BYTES;
} else { //sketchStructure == UPDATABLE
offset = DATA_START_ADR + getLevelsArrSizeBytes(sketchStructure) + (2 + getK() - 1) * ITEM_BYTES;
}
return wmem.getDouble(offset);
}
@Override
int getM() {
return getMemoryM(wmem);
}
@Override
MemoryRequestServer getMemoryRequestServer() { return memReqSvr; }
@Override
int getMinK() {
if (sketchStructure == COMPACT_FULL || sketchStructure == UPDATABLE) { return getMemoryMinK(wmem); }
return getK();
}
@Override
byte[] getMinMaxByteArr() {
final byte[] bytesOut = new byte[2 * ITEM_BYTES];
if (sketchStructure == COMPACT_EMPTY) {
ByteArrayUtil.putDoubleLE(bytesOut, 0, Double.NaN);
ByteArrayUtil.putDoubleLE(bytesOut, ITEM_BYTES, Double.NaN);
return bytesOut;
}
final int offset;
if (sketchStructure == COMPACT_SINGLE) {
offset = DATA_START_ADR_SINGLE_ITEM;
wmem.getByteArray(offset, bytesOut, 0, ITEM_BYTES);
copyBytes(bytesOut, 0, bytesOut, ITEM_BYTES, ITEM_BYTES);
return bytesOut;
}
//sketchStructure == UPDATABLE OR COMPACT_FULL
offset = DATA_START_ADR + getLevelsArrSizeBytes(sketchStructure);
wmem.getByteArray(offset, bytesOut, 0, ITEM_BYTES);
wmem.getByteArray(offset + ITEM_BYTES, bytesOut, ITEM_BYTES, ITEM_BYTES);
return bytesOut;
}
@Override
byte[] getRetainedItemsByteArr() {
if (sketchStructure == COMPACT_EMPTY) { return new byte[0]; }
final double[] dblArr = getDoubleRetainedItemsArray();
final byte[] dblByteArr = new byte[dblArr.length * ITEM_BYTES];
final WritableMemory wmem2 = WritableMemory.writableWrap(dblByteArr);
wmem2.putDoubleArray(0, dblArr, 0, dblArr.length);
return dblByteArr;
}
@Override
byte[] getTotalItemsByteArr() {
final double[] dblArr = getDoubleItemsArray();
final byte[] dblByteArr = new byte[dblArr.length * ITEM_BYTES];
final WritableMemory wmem2 = WritableMemory.writableWrap(dblByteArr);
wmem2.putDoubleArray(0, dblArr, 0, dblArr.length);
return dblByteArr;
}
@Override
WritableMemory getWritableMemory() {
return wmem;
}
@Override
void incN() {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
long n = getMemoryN(wmem);
setMemoryN(wmem, ++n);
}
@Override
void incNumLevels() {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
int numLevels = getMemoryNumLevels(wmem);
setMemoryNumLevels(wmem, ++numLevels);
}
@Override
boolean isLevelZeroSorted() {
return getMemoryLevelZeroSortedFlag(wmem);
}
@Override
void setDoubleItemsArray(final double[] doubleItems) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
final int offset = DATA_START_ADR + getLevelsArrSizeBytes(sketchStructure) + 2 * ITEM_BYTES;
wmem.putDoubleArray(offset, doubleItems, 0, doubleItems.length);
}
@Override
void setDoubleItemsArrayAt(final int index, final double item) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
final int offset =
DATA_START_ADR + getLevelsArrSizeBytes(sketchStructure) + (index + 2) * ITEM_BYTES;
wmem.putDouble(offset, item);
}
@Override
void setLevelZeroSorted(final boolean sorted) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
setMemoryLevelZeroSortedFlag(wmem, sorted);
}
@Override
void setMaxItem(final double item) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
final int offset = DATA_START_ADR + getLevelsArrSizeBytes(sketchStructure) + ITEM_BYTES;
wmem.putDouble(offset, item);
}
@Override
void setMinItem(final double item) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
final int offset = DATA_START_ADR + getLevelsArrSizeBytes(sketchStructure);
wmem.putDouble(offset, item);
}
@Override
void setMinK(final int minK) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
setMemoryMinK(wmem, minK);
}
@Override
void setN(final long n) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
setMemoryN(wmem, n);
}
@Override
void setNumLevels(final int numLevels) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
setMemoryNumLevels(wmem, numLevels);
}
@Override
void setWritableMemory(final WritableMemory wmem) {
this.wmem = wmem;
}
final static class KllDirectCompactDoublesSketch extends KllDirectDoublesSketch {
KllDirectCompactDoublesSketch(
final SketchStructure sketchStructure,
final Memory srcMem,
final KllMemoryValidate memVal) {
super(sketchStructure, (WritableMemory) srcMem, null, memVal);
}
}
}
| 2,649 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllDoublesSketchIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import org.apache.datasketches.quantilescommon.QuantilesDoublesSketchIterator;
/**
* Iterator over KllDoublesSketch. The order is not defined.
*/
public final class KllDoublesSketchIterator implements QuantilesDoublesSketchIterator {
private final double[] quantiles;
private final int[] levelsArr;
private final int numLevels;
private int level;
private int index;
private long weight;
private boolean isInitialized;
KllDoublesSketchIterator(final double[] quantiles, final int[] levelsArr, final int numLevels) {
this.quantiles = quantiles;
this.levelsArr = levelsArr;
this.numLevels = numLevels;
this.isInitialized = false;
}
@Override
public double getQuantile() {
return quantiles[index];
}
@Override
public long getWeight() {
return weight;
}
@Override
public boolean next() {
if (!isInitialized) {
level = 0;
index = levelsArr[level];
weight = 1;
isInitialized = true;
} else {
index++;
}
if (index < levelsArr[level + 1]) {
return true;
}
// go to the next non-empty level
do {
level++;
if (level == numLevels) {
return false; // run out of levels
}
weight *= 2;
} while (levelsArr[level] == levelsArr[level + 1]);
index = levelsArr[level];
return true;
}
}
| 2,650 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllItemsSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static java.lang.Math.max;
import static java.lang.Math.min;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.UPDATABLE;
import static org.apache.datasketches.kll.KllSketch.SketchType.ITEMS_SKETCH;
import static org.apache.datasketches.quantilescommon.QuantilesUtil.equallyWeightedRanks;
import java.lang.reflect.Array;
import java.util.Comparator;
import java.util.Objects;
import org.apache.datasketches.common.ArrayOfItemsSerDe;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableMemory;
import org.apache.datasketches.quantilescommon.QuantileSearchCriteria;
import org.apache.datasketches.quantilescommon.QuantilesGenericAPI;
import org.apache.datasketches.quantilescommon.QuantilesGenericSketchIterator;
/**
* This variation of the KllSketch implements generic data types. The user must provide
* a suitable implementation of the <i>java.lang.Comparator</i> as well as an implementation of
* the serializer / deserializer, <i>org.apache.datasketches.common.ArrayOfItemsSerDe</i>.
* @param <T> The sketch data type.
* @see org.apache.datasketches.kll.KllSketch
*/
@SuppressWarnings("unchecked")
public abstract class KllItemsSketch<T> extends KllSketch implements QuantilesGenericAPI<T> {
private KllItemsSketchSortedView<T> kllItemsSV = null;
final Comparator<? super T> comparator;
final ArrayOfItemsSerDe<T> serDe;
KllItemsSketch(
final SketchStructure skStructure,
final Comparator<? super T> comparator,
final ArrayOfItemsSerDe<T> serDe) {
super(ITEMS_SKETCH, skStructure);
Objects.requireNonNull(comparator, "Comparator must not be null.");
Objects.requireNonNull(serDe, "SerDe must not be null.");
this.comparator = comparator;
this.serDe = serDe;
}
//Factories for new heap instances.
/**
* Create a new heap instance of this sketch with the default <em>k = 200</em>.
* The default <em>k</em> = 200 results in a normalized rank error of about
* 1.65%. Larger K will have smaller error but the sketch will be larger (and slower).
* @param comparator to compare items
* @param serDe Serializer / deserializer for an array of items, <i>T[]</i>.
* @param <T> The sketch data type.
* @return new KllItemsSketch on the Java heap.
*/
public static <T> KllItemsSketch<T> newHeapInstance(
final Comparator<? super T> comparator,
final ArrayOfItemsSerDe<T> serDe) {
final KllItemsSketch<T> itmSk =
new KllHeapItemsSketch<>(DEFAULT_K, DEFAULT_M, comparator, serDe);
return itmSk;
}
/**
* Create a new heap instance of this sketch with a given parameter <em>k</em>.
* <em>k</em> can be between DEFAULT_M and 65535, inclusive.
* The default <em>k</em> = 200 results in a normalized rank error of about
* 1.65%. Larger K will have smaller error but the sketch will be larger (and slower).
* @param k parameter that controls size of the sketch and accuracy of estimates.
* @param comparator to compare items
* @param serDe Serializer / deserializer for items of type <i>T</i> and <i>T[]</i>.
* @param <T> The sketch data type
* @return new KllItemsSketch on the heap.
*/
public static <T> KllItemsSketch<T> newHeapInstance(
final int k,
final Comparator<? super T> comparator,
final ArrayOfItemsSerDe<T> serDe) {
return new KllHeapItemsSketch<>(k, DEFAULT_M, comparator, serDe);
}
// Factory to create an heap instance from a Memory image
/**
* Factory heapify takes a compact sketch image in Memory and instantiates an on-heap sketch.
* The resulting sketch will not retain any link to the source Memory.
* @param srcMem a compact Memory image of a sketch serialized by this sketch and of the same type of T.
* @param comparator to compare items
* @param serDe Serializer / deserializer for items of type <i>T</i> and <i>T[]</i>.
* @param <T> The sketch data type
* @return a heap-based sketch based on the given Memory.
*/
public static <T> KllItemsSketch<T> heapify(
final Memory srcMem,
final Comparator<? super T> comparator,
final ArrayOfItemsSerDe<T> serDe) {
return new KllHeapItemsSketch<>(srcMem, comparator, serDe);
}
//Factory to wrap a Read-Only Memory
/**
* Constructs a thin wrapper on the heap around a Memory (or WritableMemory) already initialized with a
* validated sketch image of a type T consistent with the given comparator and serDe.
* A reference to the Memory is kept in the sketch and must remain in scope consistent
* with the temporal scope of this sketch. The amount of data kept on the heap is very small.
* All of the item data originally collected by the given Memory sketch object remains in the
* Memory object
* @param srcMem the Memory object that this sketch will wrap.
* @param comparator to compare items
* @param serDe Serializer / deserializer for items of type <i>T</i> and <i>T[]</i>.
* @param <T> The sketch data type
* @return a heap-base sketch that is a thin wrapper around the given srcMem.
*/
public static <T> KllItemsSketch<T> wrap(
final Memory srcMem,
final Comparator<? super T> comparator,
final ArrayOfItemsSerDe<T> serDe) {
final KllMemoryValidate memVal = new KllMemoryValidate(srcMem, SketchType.ITEMS_SKETCH, serDe);
return new KllDirectCompactItemsSketch<>(memVal, comparator, serDe);
}
//END of Constructors
@Override
public double[] getCDF(final T[] splitPoints, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
refreshSortedView();
return kllItemsSV.getCDF(splitPoints, searchCrit);
}
@Override
public GenericPartitionBoundaries<T> getPartitionBoundaries(final int numEquallyWeighted,
final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
final double[] ranks = equallyWeightedRanks(numEquallyWeighted);
final Object[] boundaries = getQuantiles(ranks, searchCrit);
boundaries[0] = getMinItem();
boundaries[boundaries.length - 1] = getMaxItem();
final GenericPartitionBoundaries<T> gpb = new GenericPartitionBoundaries<>();
gpb.N = this.getN();
gpb.ranks = ranks;
gpb.boundaries = (T[])boundaries;
return gpb;
}
@Override
public double[] getPMF(final T[] splitPoints, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
refreshSortedView();
return kllItemsSV.getPMF(splitPoints, searchCrit);
}
@Override
public T getQuantile(final double rank, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
refreshSortedView();
return kllItemsSV.getQuantile(rank, searchCrit);
}
@Override
public T[] getQuantiles(final double[] ranks, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
refreshSortedView();
final int len = ranks.length;
final T[] quantiles = (T[]) Array.newInstance(getMinItem().getClass(), len);
for (int i = 0; i < len; i++) {
quantiles[i] = kllItemsSV.getQuantile(ranks[i], searchCrit);
}
return quantiles;
}
@Override
public T getQuantileLowerBound(final double rank) {
return getQuantile(max(0, rank - KllHelper.getNormalizedRankError(getMinK(), false)));
}
@Override
public T getQuantileUpperBound(final double rank) {
return getQuantile(min(1.0, rank + KllHelper.getNormalizedRankError(getMinK(), false)));
}
@Override
public double getRank(final T quantile, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
refreshSortedView();
return kllItemsSV.getRank(quantile, searchCrit);
}
/**
* {@inheritDoc}
* The approximate probability that the true rank is within the confidence interval
* specified by the upper and lower rank bounds for this sketch is 0.99.
*/
@Override
public double getRankLowerBound(final double rank) {
return max(0.0, rank - KllHelper.getNormalizedRankError(getMinK(), false));
}
/**
* {@inheritDoc}
* The approximate probability that the true rank is within the confidence interval
* specified by the upper and lower rank bounds for this sketch is 0.99.
*/
@Override
public double getRankUpperBound(final double rank) {
return min(1.0, rank + KllHelper.getNormalizedRankError(getMinK(), false));
}
@Override
public double[] getRanks(final T[] quantiles, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
refreshSortedView();
final int len = quantiles.length;
final double[] ranks = new double[len];
for (int i = 0; i < len; i++) {
ranks[i] = kllItemsSV.getRank(quantiles[i], searchCrit);
}
return ranks;
}
@Override
public final KllItemsSketchSortedView<T> getSortedView() {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
return refreshSortedView();
//return kllItemsSV; //SpotBugs EI_EXPOSE_REP, Suppressed by FindBugsExcludeFilter
}
@Override
public QuantilesGenericSketchIterator<T> iterator() {
return new KllItemsSketchIterator<>(
getTotalItemsArray(), getLevelsArray(SketchStructure.UPDATABLE), getNumLevels());
}
@Override
public final void merge(final KllSketch other) {
if (readOnly || sketchStructure != UPDATABLE) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
final KllItemsSketch<T> othItmSk = (KllItemsSketch<T>)other;
if (othItmSk.isEmpty()) { return; }
KllItemsHelper.mergeItemImpl(this, othItmSk, comparator);
kllItemsSV = null;
}
@Override
public void reset() {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
final int k = getK();
setN(0);
setMinK(k);
setNumLevels(1);
setLevelZeroSorted(false);
setLevelsArray(new int[] {k, k});
setMinItem(null);
setMaxItem(null);
setItemsArray(new Object[k]);
kllItemsSV = null;
}
public byte[] toByteArray() {
return KllHelper.toByteArray(this, false);
}
@Override
public void update(final T item) {
if (readOnly) { throw new SketchesArgumentException(TGT_IS_READ_ONLY_MSG); }
KllItemsHelper.updateItem(this, item, comparator);
kllItemsSV = null;
}
//restricted
@Override
MemoryRequestServer getMemoryRequestServer() {
//this is not used and must return a null
return null;
}
@Override
abstract byte[] getMinMaxByteArr();
@Override
abstract int getMinMaxSizeBytes();
private final KllItemsSketchSortedView<T> refreshSortedView() {
final KllItemsSketchSortedView<T> sv = (kllItemsSV == null)
? kllItemsSV = new KllItemsSketchSortedView<>(this)
: kllItemsSV;
return sv;
}
abstract T[] getRetainedItemsArray();
@Override
abstract byte[] getRetainedItemsByteArr();
@Override
abstract int getRetainedItemsSizeBytes();
//abstract Object[] getRetainedItemsArray();
@Override
ArrayOfItemsSerDe<T> getSerDe() { return serDe; }
abstract T getSingleItem();
@Override
abstract byte[] getSingleItemByteArr();
@Override
abstract int getSingleItemSizeBytes();
/**
* @return a full array of items as if the sketch was in COMPACT_FULL or UPDATABLE format.
* This will include zeros and possibly some garbage items.
*/
abstract T[] getTotalItemsArray();
@Override
byte[] getTotalItemsByteArr() {
throw new SketchesArgumentException(UNSUPPORTED_MSG);
}
@Override
int getTotalItemsNumBytes() {
throw new SketchesArgumentException(UNSUPPORTED_MSG);
}
@Override
void incNumLevels() {
//this is not used and must be a no-op.
}
abstract void setItemsArray(Object[] ItemsArr);
abstract void setItemsArrayAt(int index, Object item);
abstract void setMaxItem(Object item);
abstract void setMinItem(Object item);
@Override
void setNumLevels(final int numLevels) {
// this is not used and must be a no-op.
}
@Override
void setWritableMemory(final WritableMemory wmem) {
throw new SketchesArgumentException(UNSUPPORTED_MSG + "Sketch not writable.");
}
}
| 2,651 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllItemsSketchSortedView.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static org.apache.datasketches.quantilescommon.QuantileSearchCriteria.INCLUSIVE;
import static org.apache.datasketches.quantilescommon.QuantilesAPI.EMPTY_MSG;
import java.lang.reflect.Array;
import java.util.Arrays;
import java.util.Comparator;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.common.Util;
import org.apache.datasketches.quantilescommon.GenericInequalitySearch;
import org.apache.datasketches.quantilescommon.GenericInequalitySearch.Inequality;
import org.apache.datasketches.quantilescommon.GenericSortedView;
import org.apache.datasketches.quantilescommon.GenericSortedViewIterator;
import org.apache.datasketches.quantilescommon.InequalitySearch;
import org.apache.datasketches.quantilescommon.QuantileSearchCriteria;
import org.apache.datasketches.quantilescommon.QuantilesAPI;
import org.apache.datasketches.quantilescommon.QuantilesUtil;
/**
* The SortedView of the KllItemsSketch.
* @param <T> The sketch data type
* @author Alexander Saydakov
* @author Lee Rhodes
*/
@SuppressWarnings("unchecked")
public class KllItemsSketchSortedView<T> implements GenericSortedView<T> {
private final Object[] quantiles;
private final long[] cumWeights; //comes in as individual weights, converted to cumulative natural weights
private final long totalN;
private final T minItem;
private final Comparator<? super T> comp;
/**
* Construct from elements for testing only.
* @param quantiles sorted array of quantiles
* @param cumWeights sorted, monotonically increasing cumulative weights.
* @param totalN the total number of items presented to the sketch.
* @param minItem used to extract the type of T
* @param comparator the Comparator for type T
*/
KllItemsSketchSortedView(
final T[] quantiles,
final long[] cumWeights,
final long totalN,
final T minItem,
final Comparator<? super T> comparator) {
this.quantiles = quantiles;
this.cumWeights = cumWeights;
this.totalN = totalN;
this.minItem = minItem;
this.comp = comparator;
}
/**
* Constructs this Sorted View given the sketch
* @param sk the given KllItemsSketch.
*/
KllItemsSketchSortedView(final KllItemsSketch<T> sk) {
this.totalN = sk.getN();
this.minItem = sk.getMinItem();
final Object[] srcQuantiles = sk.getTotalItemsArray();
final int[] srcLevels = sk.levelsArr;
final int srcNumLevels = sk.getNumLevels();
this.comp = sk.comparator;
if (totalN == 0) { throw new SketchesArgumentException(QuantilesAPI.EMPTY_MSG); }
if (!sk.isLevelZeroSorted()) {
Arrays.sort((T[])srcQuantiles, srcLevels[0], srcLevels[1], comp);
if (!sk.hasMemory()) { sk.setLevelZeroSorted(true); }
}
final int numQuantiles = srcLevels[srcNumLevels] - srcLevels[0]; //remove garbage
quantiles = new Object[numQuantiles];
cumWeights = new long[numQuantiles];
populateFromSketch(srcQuantiles, srcLevels, srcNumLevels, numQuantiles);
}
//end of constructors
@Override //implemented here because it needs the comparator
public double[] getCDF(final T[] splitPoints, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
GenericSortedView.validateItems(splitPoints, comp);
final int len = splitPoints.length + 1;
final double[] buckets = new double[len];
for (int i = 0; i < len - 1; i++) {
buckets[i] = getRank(splitPoints[i], searchCrit);
}
buckets[len - 1] = 1.0;
return buckets;
}
@Override
public long[] getCumulativeWeights() {
return cumWeights.clone();
}
@Override //implemented here because it needs the comparator
public double[] getPMF(final T[] splitPoints, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
GenericSortedView.validateItems(splitPoints, comp);
final double[] buckets = getCDF(splitPoints, searchCrit);
final int len = buckets.length;
for (int i = len; i-- > 1; ) {
buckets[i] -= buckets[i - 1];
}
return buckets;
}
@Override
public T getQuantile(final double rank, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
QuantilesUtil.checkNormalizedRankBounds(rank);
final int len = cumWeights.length;
final long naturalRank = (searchCrit == INCLUSIVE)
? (long)Math.ceil(rank * totalN) : (long)Math.floor(rank * totalN);
final InequalitySearch crit = (searchCrit == INCLUSIVE) ? InequalitySearch.GE : InequalitySearch.GT;
final int index = InequalitySearch.find(cumWeights, 0, len - 1, naturalRank, crit);
if (index == -1) {
return (T) quantiles[quantiles.length - 1]; //EXCLUSIVE (GT) case: normRank == 1.0;
}
return (T) quantiles[index];
}
@Override
public T[] getQuantiles() {
final T[] quants = (T[]) Array.newInstance(minItem.getClass(), quantiles.length);
System.arraycopy(quantiles, 0, quants, 0, quantiles.length);
return quants;
}
@Override
public double getRank(final T quantile, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
final int len = quantiles.length;
final Inequality crit = (searchCrit == INCLUSIVE) ? Inequality.LE : Inequality.LT;
final int index = GenericInequalitySearch.find((T[])quantiles, 0, len - 1, quantile, crit, comp);
if (index == -1) {
return 0; //EXCLUSIVE (LT) case: quantile <= minQuantile; INCLUSIVE (LE) case: quantile < minQuantile
}
return (double)cumWeights[index] / totalN;
}
@Override
public boolean isEmpty() {
return totalN == 0;
}
@Override
public KllItemsSketchSortedViewIterator<T> iterator() {
return new KllItemsSketchSortedViewIterator<>((T[])quantiles, cumWeights);
}
//restricted methods
private void populateFromSketch(final Object[] srcQuantiles, final int[] srcLevels,
final int srcNumLevels, final int numItems) {
final int[] myLevels = new int[srcNumLevels + 1];
final int offset = srcLevels[0];
System.arraycopy(srcQuantiles, offset, quantiles, 0, numItems);
int srcLevel = 0;
int dstLevel = 0;
long weight = 1;
while (srcLevel < srcNumLevels) {
final int fromIndex = srcLevels[srcLevel] - offset;
final int toIndex = srcLevels[srcLevel + 1] - offset; // exclusive
if (fromIndex < toIndex) { // if equal, skip empty level
Arrays.fill(cumWeights, fromIndex, toIndex, weight);
myLevels[dstLevel] = fromIndex;
myLevels[dstLevel + 1] = toIndex;
dstLevel++;
}
srcLevel++;
weight *= 2;
}
final int numLevels = dstLevel;
blockyTandemMergeSort(quantiles, cumWeights, myLevels, numLevels, comp); //create unit weights
KllHelper.convertToCumulative(cumWeights);
}
private static <T> void blockyTandemMergeSort(final Object[] quantiles, final long[] weights,
final int[] levels, final int numLevels, final Comparator<? super T> comp) {
if (numLevels == 1) { return; }
// duplicate the input in preparation for the "ping-pong" copy reduction strategy.
final Object[] quantilesTmp = Arrays.copyOf(quantiles, quantiles.length);
final long[] weightsTmp = Arrays.copyOf(weights, quantiles.length); // don't need the extra one here
blockyTandemMergeSortRecursion(quantilesTmp, weightsTmp, quantiles, weights, levels, 0, numLevels, comp);
}
private static <T> void blockyTandemMergeSortRecursion(
final Object[] quantilesSrc, final long[] weightsSrc,
final Object[] quantilesDst, final long[] weightsDst,
final int[] levels, final int startingLevel, final int numLevels, final Comparator<? super T> comp) {
if (numLevels == 1) { return; }
final int numLevels1 = numLevels / 2;
final int numLevels2 = numLevels - numLevels1;
assert numLevels1 >= 1;
assert numLevels2 >= numLevels1;
final int startingLevel1 = startingLevel;
final int startingLevel2 = startingLevel + numLevels1;
// swap roles of src and dst
blockyTandemMergeSortRecursion(
quantilesDst, weightsDst,
quantilesSrc, weightsSrc,
levels, startingLevel1, numLevels1, comp);
blockyTandemMergeSortRecursion(
quantilesDst, weightsDst,
quantilesSrc, weightsSrc,
levels, startingLevel2, numLevels2, comp);
tandemMerge(
quantilesSrc, weightsSrc,
quantilesDst, weightsDst,
levels,
startingLevel1, numLevels1,
startingLevel2, numLevels2, comp);
}
private static <T> void tandemMerge(
final Object[] quantilesSrc, final long[] weightsSrc,
final Object[] quantilesDst, final long[] weightsDst,
final int[] levelStarts,
final int startingLevel1, final int numLevels1,
final int startingLevel2, final int numLevels2, final Comparator<? super T> comp) {
final int fromIndex1 = levelStarts[startingLevel1];
final int toIndex1 = levelStarts[startingLevel1 + numLevels1]; // exclusive
final int fromIndex2 = levelStarts[startingLevel2];
final int toIndex2 = levelStarts[startingLevel2 + numLevels2]; // exclusive
int iSrc1 = fromIndex1;
int iSrc2 = fromIndex2;
int iDst = fromIndex1;
while (iSrc1 < toIndex1 && iSrc2 < toIndex2) {
if (Util.lt((T) quantilesSrc[iSrc1], (T) quantilesSrc[iSrc2], comp)) {
quantilesDst[iDst] = quantilesSrc[iSrc1];
weightsDst[iDst] = weightsSrc[iSrc1];
iSrc1++;
} else {
quantilesDst[iDst] = quantilesSrc[iSrc2];
weightsDst[iDst] = weightsSrc[iSrc2];
iSrc2++;
}
iDst++;
}
if (iSrc1 < toIndex1) {
System.arraycopy(quantilesSrc, iSrc1, quantilesDst, iDst, toIndex1 - iSrc1);
System.arraycopy(weightsSrc, iSrc1, weightsDst, iDst, toIndex1 - iSrc1);
} else if (iSrc2 < toIndex2) {
System.arraycopy(quantilesSrc, iSrc2, quantilesDst, iDst, toIndex2 - iSrc2);
System.arraycopy(weightsSrc, iSrc2, weightsDst, iDst, toIndex2 - iSrc2);
}
}
/**
* Iterator over KllItemsSketchSortedView.
* @param <T> type of quantile (item)
*/
public static final class KllItemsSketchSortedViewIterator<T> extends GenericSortedViewIterator<T> {
KllItemsSketchSortedViewIterator(final T[] quantiles, final long[] cumWeights) {
super(quantiles, cumWeights);
}
}
}
| 2,652 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllDoublesSketchSortedViewIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static org.apache.datasketches.quantilescommon.QuantileSearchCriteria.INCLUSIVE;
import org.apache.datasketches.quantilescommon.DoublesSortedViewIterator;
import org.apache.datasketches.quantilescommon.QuantileSearchCriteria;
/**
* Iterator over KllDoublesSketchSortedView
* @author Alexander Saydakov
* @author Lee Rhodes
*/
public final class KllDoublesSketchSortedViewIterator implements DoublesSortedViewIterator {
private final double[] quantiles;
private final long[] cumWeights;
private final long totalN;
private int index;
KllDoublesSketchSortedViewIterator(final double[] quantiles, final long[] cumWeights) {
this.quantiles = quantiles;
this.cumWeights = cumWeights;
this.totalN = (cumWeights.length > 0) ? cumWeights[cumWeights.length - 1] : 0;
index = -1;
}
@Override
public long getCumulativeWeight(final QuantileSearchCriteria searchCrit) {
if (searchCrit == INCLUSIVE) { return cumWeights[index]; }
return (index == 0) ? 0 : cumWeights[index - 1];
}
@Override
public long getN() {
return totalN;
}
@Override
public double getNormalizedRank(final QuantileSearchCriteria searchCrit) {
return (double) getCumulativeWeight(searchCrit) / totalN;
}
@Override
public double getQuantile() {
return quantiles[index];
}
@Override
public long getWeight() {
if (index == 0) { return cumWeights[0]; }
return cumWeights[index] - cumWeights[index - 1];
}
@Override
public boolean next() {
index++;
return index < quantiles.length;
}
}
| 2,653 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllFloatsHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static java.lang.Math.max;
import static java.lang.Math.min;
import static org.apache.datasketches.common.Util.isEven;
import static org.apache.datasketches.common.Util.isOdd;
import static org.apache.datasketches.kll.KllHelper.findLevelToCompact;
import java.util.Arrays;
import java.util.Random;
import org.apache.datasketches.memory.WritableMemory;
//
/**
* Static methods to support KllFloatsSketch
* @author Kevin Lang
* @author Alexander Saydakov
*/
//
final class KllFloatsHelper {
/**
* The following code is only valid in the special case of exactly reaching capacity while updating.
* It cannot be used while merging, while reducing k, or anything else.
* @param fltSk the current KllFloatsSketch
*/
private static void compressWhileUpdatingSketch(final KllFloatsSketch fltSk) {
final int level =
findLevelToCompact(fltSk.getK(), fltSk.getM(), fltSk.getNumLevels(), fltSk.levelsArr);
if (level == fltSk.getNumLevels() - 1) {
//The level to compact is the top level, thus we need to add a level.
//Be aware that this operation grows the items array,
//shifts the items data and the level boundaries of the data,
//and grows the levels array and increments numLevels_.
KllHelper.addEmptyTopLevelToCompletelyFullSketch(fltSk);
}
//after this point, the levelsArray will not be expanded, only modified.
final int[] myLevelsArr = fltSk.levelsArr;
final int rawBeg = myLevelsArr[level];
final int rawEnd = myLevelsArr[level + 1];
// +2 is OK because we already added a new top level if necessary
final int popAbove = myLevelsArr[level + 2] - rawEnd;
final int rawPop = rawEnd - rawBeg;
final boolean oddPop = isOdd(rawPop);
final int adjBeg = oddPop ? rawBeg + 1 : rawBeg;
final int adjPop = oddPop ? rawPop - 1 : rawPop;
final int halfAdjPop = adjPop / 2;
//the following is specific to Floats
final float[] myFloatItemsArr = fltSk.getFloatItemsArray();
if (level == 0) { // level zero might not be sorted, so we must sort it if we wish to compact it
Arrays.sort(myFloatItemsArr, adjBeg, adjBeg + adjPop);
}
if (popAbove == 0) {
KllFloatsHelper.randomlyHalveUpFloats(myFloatItemsArr, adjBeg, adjPop, KllSketch.random);
} else {
KllFloatsHelper.randomlyHalveDownFloats(myFloatItemsArr, adjBeg, adjPop, KllSketch.random);
KllFloatsHelper.mergeSortedFloatArrays(
myFloatItemsArr, adjBeg, halfAdjPop,
myFloatItemsArr, rawEnd, popAbove,
myFloatItemsArr, adjBeg + halfAdjPop);
}
int newIndex = myLevelsArr[level + 1] - halfAdjPop; // adjust boundaries of the level above
fltSk.setLevelsArrayAt(level + 1, newIndex);
if (oddPop) {
fltSk.setLevelsArrayAt(level, myLevelsArr[level + 1] - 1); // the current level now contains one item
myFloatItemsArr[myLevelsArr[level]] = myFloatItemsArr[rawBeg]; // namely this leftover guy
} else {
fltSk.setLevelsArrayAt(level, myLevelsArr[level + 1]); // the current level is now empty
}
// verify that we freed up halfAdjPop array slots just below the current level
assert myLevelsArr[level] == rawBeg + halfAdjPop;
// finally, we need to shift up the data in the levels below
// so that the freed-up space can be used by level zero
if (level > 0) {
final int amount = rawBeg - myLevelsArr[0];
System.arraycopy(myFloatItemsArr, myLevelsArr[0], myFloatItemsArr, myLevelsArr[0] + halfAdjPop, amount);
}
for (int lvl = 0; lvl < level; lvl++) {
newIndex = myLevelsArr[lvl] + halfAdjPop; //adjust boundary
fltSk.setLevelsArrayAt(lvl, newIndex);
}
fltSk.setFloatItemsArray(myFloatItemsArr);
}
//assumes readOnly = false, and UPDATABLE, called from KllFloatsSketch::merge
static void mergeFloatImpl(final KllFloatsSketch mySketch,
final KllFloatsSketch otherFltSk) {
if (otherFltSk.isEmpty()) { return; }
//capture my key mutable fields before doing any merging
final boolean myEmpty = mySketch.isEmpty();
final float myMin = myEmpty ? Float.NaN : mySketch.getMinItem();
final float myMax = myEmpty ? Float.NaN : mySketch.getMaxItem();
final int myMinK = mySketch.getMinK();
final long finalN = mySketch.getN() + otherFltSk.getN();
//buffers that are referenced multiple times
final int otherNumLevels = otherFltSk.getNumLevels();
final int[] otherLevelsArr = otherFltSk.levelsArr;
final float[] otherFloatItemsArr;
//MERGE: update this sketch with level0 items from the other sketch
if (otherFltSk.isCompactSingleItem()) {
updateFloat(mySketch, otherFltSk.getFloatSingleItem());
otherFloatItemsArr = new float[0];
} else {
otherFloatItemsArr = otherFltSk.getFloatItemsArray();
for (int i = otherLevelsArr[0]; i < otherLevelsArr[1]; i++) {
updateFloat(mySketch, otherFloatItemsArr[i]);
}
}
//After the level 0 update, we capture the intermediate state of levels and items arrays...
final int myCurNumLevels = mySketch.getNumLevels();
final int[] myCurLevelsArr = mySketch.levelsArr;
final float[] myCurFloatItemsArr = mySketch.getFloatItemsArray();
// then rename them and initialize in case there are no higher levels
int myNewNumLevels = myCurNumLevels;
int[] myNewLevelsArr = myCurLevelsArr;
float[] myNewFloatItemsArr = myCurFloatItemsArr;
//merge higher levels if they exist
if (otherNumLevels > 1 && !otherFltSk.isCompactSingleItem()) {
final int tmpSpaceNeeded = mySketch.getNumRetained()
+ KllHelper.getNumRetainedAboveLevelZero(otherNumLevels, otherLevelsArr);
final float[] workbuf = new float[tmpSpaceNeeded];
final int ub = KllHelper.ubOnNumLevels(finalN);
final int[] worklevels = new int[ub + 2]; // ub+1 does not work
final int[] outlevels = new int[ub + 2];
final int provisionalNumLevels = max(myCurNumLevels, otherNumLevels);
populateFloatWorkArrays(workbuf, worklevels, provisionalNumLevels,
myCurNumLevels, myCurLevelsArr, myCurFloatItemsArr,
otherNumLevels, otherLevelsArr, otherFloatItemsArr);
// notice that workbuf is being used as both the input and output
final int[] result = generalFloatsCompress(mySketch.getK(), mySketch.getM(), provisionalNumLevels,
workbuf, worklevels, workbuf, outlevels, mySketch.isLevelZeroSorted(), KllSketch.random);
final int targetItemCount = result[1]; //was finalCapacity. Max size given k, m, numLevels
final int curItemCount = result[2]; //was finalPop
// now we need to finalize the results for mySketch
//THE NEW NUM LEVELS
myNewNumLevels = result[0];
assert myNewNumLevels <= ub; // ub may be much bigger
// THE NEW ITEMS ARRAY
myNewFloatItemsArr = (targetItemCount == myCurFloatItemsArr.length)
? myCurFloatItemsArr
: new float[targetItemCount];
final int freeSpaceAtBottom = targetItemCount - curItemCount;
//shift the new items array create space at bottom
System.arraycopy(workbuf, outlevels[0], myNewFloatItemsArr, freeSpaceAtBottom, curItemCount);
final int theShift = freeSpaceAtBottom - outlevels[0];
//calculate the new levels array length
final int finalLevelsArrLen;
if (myCurLevelsArr.length < myNewNumLevels + 1) { finalLevelsArrLen = myNewNumLevels + 1; }
else { finalLevelsArrLen = myCurLevelsArr.length; }
//THE NEW LEVELS ARRAY
myNewLevelsArr = new int[finalLevelsArrLen];
for (int lvl = 0; lvl < myNewNumLevels + 1; lvl++) { // includes the "extra" index
myNewLevelsArr[lvl] = outlevels[lvl] + theShift;
}
//MEMORY SPACE MANAGEMENT
if (mySketch.getWritableMemory() != null) {
final WritableMemory wmem =
KllHelper.memorySpaceMgmt(mySketch, myNewLevelsArr.length, myNewFloatItemsArr.length);
mySketch.setWritableMemory(wmem);
}
}
//Update Preamble:
mySketch.setN(finalN);
if (otherFltSk.isEstimationMode()) { //otherwise the merge brings over exact items.
mySketch.setMinK(min(myMinK, otherFltSk.getMinK()));
}
//Update numLevels, levelsArray, items
mySketch.setNumLevels(myNewNumLevels);
mySketch.setLevelsArray(myNewLevelsArr);
mySketch.setFloatItemsArray(myNewFloatItemsArr);
//Update min, max items
final float otherMin = otherFltSk.getMinItem();
final float otherMax = otherFltSk.getMaxItem();
if (myEmpty) {
mySketch.setMinItem(otherMin);
mySketch.setMaxItem(otherMax);
} else {
mySketch.setMinItem(min(myMin, otherMin));
mySketch.setMaxItem(max(myMax, otherMax));
}
assert KllHelper.sumTheSampleWeights(mySketch.getNumLevels(), mySketch.levelsArr) == mySketch.getN();
}
private static void mergeSortedFloatArrays(
final float[] bufA, final int startA, final int lenA,
final float[] bufB, final int startB, final int lenB,
final float[] bufC, final int startC) {
final int lenC = lenA + lenB;
final int limA = startA + lenA;
final int limB = startB + lenB;
final int limC = startC + lenC;
int a = startA;
int b = startB;
for (int c = startC; c < limC; c++) {
if (a == limA) {
bufC[c] = bufB[b];
b++;
} else if (b == limB) {
bufC[c] = bufA[a];
a++;
} else if (bufA[a] < bufB[b]) {
bufC[c] = bufA[a];
a++;
} else {
bufC[c] = bufB[b];
b++;
}
}
assert a == limA;
assert b == limB;
}
/**
* Validation Method. This must be modified to use the validation test
* @param buf the items array
* @param start data start
* @param length items array length
* @param random instance of Random
*/
//NOTE For validation Method: Need to modify to run.
private static void randomlyHalveDownFloats(final float[] buf, final int start, final int length,
final Random random) {
assert isEven(length);
final int half_length = length / 2;
final int offset = random.nextInt(2); // disable for validation
//final int offset = deterministicOffset(); // enable for validation
int j = start + offset;
for (int i = start; i < (start + half_length); i++) {
buf[i] = buf[j];
j += 2;
}
}
/**
* Validation Method. This must be modified to use the validation test
* @param buf the items array
* @param start data start
* @param length items array length
* @param random instance of Random
*/
//NOTE For validation Method: Need to modify to run.
private static void randomlyHalveUpFloats(final float[] buf, final int start, final int length,
final Random random) {
assert isEven(length);
final int half_length = length / 2;
final int offset = random.nextInt(2); // disable for validation
//final int offset = deterministicOffset(); // enable for validation
int j = (start + length) - 1 - offset;
for (int i = (start + length) - 1; i >= (start + half_length); i--) {
buf[i] = buf[j];
j -= 2;
}
}
//Called from KllFloatsSketch::update and this
static void updateFloat(final KllFloatsSketch fltSk,
final float item) {
if (Float.isNaN(item)) { return; } //ignore
if (fltSk.isEmpty()) {
fltSk.setMinItem(item);
fltSk.setMaxItem(item);
} else {
fltSk.setMinItem(min(fltSk.getMinItem(), item));
fltSk.setMaxItem(max(fltSk.getMaxItem(), item));
}
if (fltSk.levelsArr[0] == 0) { compressWhileUpdatingSketch(fltSk); }
final int myLevelsArrAtZero = fltSk.levelsArr[0]; //LevelsArr could be expanded
fltSk.incN();
fltSk.setLevelZeroSorted(false);
final int nextPos = myLevelsArrAtZero - 1;
assert myLevelsArrAtZero >= 0;
fltSk.setLevelsArrayAt(0, nextPos);
fltSk.setFloatItemsArrayAt(nextPos, item);
}
/**
* Compression algorithm used to merge higher levels.
* <p>Here is what we do for each level:</p>
* <ul><li>If it does not need to be compacted, then simply copy it over.</li>
* <li>Otherwise, it does need to be compacted, so...
* <ul><li>Copy zero or one guy over.</li>
* <li>If the level above is empty, halve up.</li>
* <li>Else the level above is nonempty, so halve down, then merge up.</li>
* </ul></li>
* <li>Adjust the boundaries of the level above.</li>
* </ul>
*
* <p>It can be proved that generalCompress returns a sketch that satisfies the space constraints
* no matter how much data is passed in.
* We are pretty sure that it works correctly when inBuf and outBuf are the same.
* All levels except for level zero must be sorted before calling this, and will still be
* sorted afterwards.
* Level zero is not required to be sorted before, and may not be sorted afterwards.</p>
*
* <p>This trashes inBuf and inLevels and modifies outBuf and outLevels.</p>
*
* @param k The sketch parameter k
* @param m The minimum level size
* @param numLevelsIn provisional number of number of levels = max(this.numLevels, other.numLevels)
* @param inBuf work buffer of size = this.getNumRetained() + other.getNumRetainedAboveLevelZero().
* This contains the float[] of the other sketch
* @param inLevels work levels array size = ubOnNumLevels(this.n + other.n) + 2
* @param outBuf the same array as inBuf
* @param outLevels the same size as inLevels
* @param isLevelZeroSorted true if this.level 0 is sorted
* @param random instance of java.util.Random
* @return int array of: {numLevels, targetItemCount, currentItemCount)
*/
//
private static int[] generalFloatsCompress(
final int k,
final int m,
final int numLevelsIn,
final float[] inBuf,
final int[] inLevels,
final float[] outBuf,
final int[] outLevels,
final boolean isLevelZeroSorted,
final Random random) {
assert numLevelsIn > 0; // things are too weird if zero levels are allowed
int numLevels = numLevelsIn;
int currentItemCount = inLevels[numLevels] - inLevels[0]; // decreases with each compaction
int targetItemCount = KllHelper.computeTotalItemCapacity(k, m, numLevels); // increases if we add levels
boolean doneYet = false;
outLevels[0] = 0;
int curLevel = -1;
while (!doneYet) {
curLevel++; // start out at level 0
// If we are at the current top level, add an empty level above it for convenience,
// but do not increment numLevels until later
if (curLevel == (numLevels - 1)) {
inLevels[curLevel + 2] = inLevels[curLevel + 1];
}
final int rawBeg = inLevels[curLevel];
final int rawLim = inLevels[curLevel + 1];
final int rawPop = rawLim - rawBeg;
if ((currentItemCount < targetItemCount) || (rawPop < KllHelper.levelCapacity(k, numLevels, curLevel, m))) {
// copy level over as is
// because inBuf and outBuf could be the same, make sure we are not moving data upwards!
assert (rawBeg >= outLevels[curLevel]);
System.arraycopy(inBuf, rawBeg, outBuf, outLevels[curLevel], rawPop);
outLevels[curLevel + 1] = outLevels[curLevel] + rawPop;
}
else {
// The sketch is too full AND this level is too full, so we compact it
// Note: this can add a level and thus change the sketch's capacity
final int popAbove = inLevels[curLevel + 2] - rawLim;
final boolean oddPop = isOdd(rawPop);
final int adjBeg = oddPop ? 1 + rawBeg : rawBeg;
final int adjPop = oddPop ? rawPop - 1 : rawPop;
final int halfAdjPop = adjPop / 2;
if (oddPop) { // copy one guy over
outBuf[outLevels[curLevel]] = inBuf[rawBeg];
outLevels[curLevel + 1] = outLevels[curLevel] + 1;
} else { // copy zero guys over
outLevels[curLevel + 1] = outLevels[curLevel];
}
// level zero might not be sorted, so we must sort it if we wish to compact it
if ((curLevel == 0) && !isLevelZeroSorted) {
Arrays.sort(inBuf, adjBeg, adjBeg + adjPop);
}
if (popAbove == 0) { // Level above is empty, so halve up
randomlyHalveUpFloats(inBuf, adjBeg, adjPop, random);
} else { // Level above is nonempty, so halve down, then merge up
randomlyHalveDownFloats(inBuf, adjBeg, adjPop, random);
mergeSortedFloatArrays(inBuf, adjBeg, halfAdjPop, inBuf, rawLim, popAbove, inBuf, adjBeg + halfAdjPop);
}
// track the fact that we just eliminated some data
currentItemCount -= halfAdjPop;
// Adjust the boundaries of the level above
inLevels[curLevel + 1] = inLevels[curLevel + 1] - halfAdjPop;
// Increment numLevels if we just compacted the old top level
// This creates some more capacity (the size of the new bottom level)
if (curLevel == (numLevels - 1)) {
numLevels++;
targetItemCount += KllHelper.levelCapacity(k, numLevels, 0, m);
}
} // end of code for compacting a level
// determine whether we have processed all levels yet (including any new levels that we created)
if (curLevel == (numLevels - 1)) { doneYet = true; }
} // end of loop over levels
assert (outLevels[numLevels] - outLevels[0]) == currentItemCount;
return new int[] {numLevels, targetItemCount, currentItemCount};
}
private static void populateFloatWorkArrays(
final float[] workbuf, final int[] worklevels, final int provisionalNumLevels,
final int myCurNumLevels, final int[] myCurLevelsArr, final float[] myCurFloatItemsArr,
final int otherNumLevels, final int[] otherLevelsArr, final float[] otherFloatItemsArr) {
worklevels[0] = 0;
// Note: the level zero data from "other" was already inserted into "self"
final int selfPopZero = KllHelper.currentLevelSizeItems(0, myCurNumLevels, myCurLevelsArr);
System.arraycopy( myCurFloatItemsArr, myCurLevelsArr[0], workbuf, worklevels[0], selfPopZero);
worklevels[1] = worklevels[0] + selfPopZero;
for (int lvl = 1; lvl < provisionalNumLevels; lvl++) {
final int selfPop = KllHelper.currentLevelSizeItems(lvl, myCurNumLevels, myCurLevelsArr);
final int otherPop = KllHelper.currentLevelSizeItems(lvl, otherNumLevels, otherLevelsArr);
worklevels[lvl + 1] = worklevels[lvl] + selfPop + otherPop;
if (selfPop > 0 && otherPop == 0) {
System.arraycopy(myCurFloatItemsArr, myCurLevelsArr[lvl], workbuf, worklevels[lvl], selfPop);
} else if (selfPop == 0 && otherPop > 0) {
System.arraycopy(otherFloatItemsArr, otherLevelsArr[lvl], workbuf, worklevels[lvl], otherPop);
} else if (selfPop > 0 && otherPop > 0) {
mergeSortedFloatArrays(
myCurFloatItemsArr, myCurLevelsArr[lvl], selfPop,
otherFloatItemsArr, otherLevelsArr[lvl], otherPop,
workbuf, worklevels[lvl]);
}
}
}
/*
* Validation Method.
* The following must be enabled for use with the KllFloatsValidationTest,
* which is only enabled for manual testing. In addition, two Validation Methods
* above need to be modified.
*/ //NOTE Validation Method: Need to uncomment to use
// static int nextOffset = 0;
//
// private static int deterministicOffset() {
// final int result = nextOffset;
// nextOffset = 1 - nextOffset;
// return result;
// }
}
| 2,654 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/KllHeapItemsSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.kll;
import static org.apache.datasketches.common.ByteArrayUtil.copyBytes;
import static org.apache.datasketches.kll.KllPreambleUtil.DATA_START_ADR;
import static org.apache.datasketches.kll.KllPreambleUtil.N_LONG_ADR;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_EMPTY;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_FULL;
import static org.apache.datasketches.kll.KllSketch.SketchStructure.COMPACT_SINGLE;
import java.lang.reflect.Array;
import java.util.Comparator;
import org.apache.datasketches.common.ArrayOfItemsSerDe;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
@SuppressWarnings("unchecked")
final class KllHeapItemsSketch<T> extends KllItemsSketch<T> {
private final int k; // configured size of K.
private final int m; // configured size of M.
private long n; // number of items input into this sketch.
private int minK; // dynamic minK for error estimation after merging with different k.
private boolean isLevelZeroSorted;
private T minItem;
private T maxItem;
private Object[] itemsArr;
/**
* Constructs a new empty instance of this sketch on the Java heap.
*/
KllHeapItemsSketch(
final int k,
final int m,
final Comparator<? super T> comparator,
final ArrayOfItemsSerDe<T> serDe) {
super(SketchStructure.UPDATABLE, comparator, serDe);
KllHelper.checkM(m);
KllHelper.checkK(k, m);
this.levelsArr = new int[] {k, k};
this.readOnly = false;
this.k = k;
this.m = m;
this.n = 0;
this.minK = k;
this.isLevelZeroSorted = false;
this.minItem = null;
this.maxItem = null;
this.itemsArr = new Object[k];
}
/**
* The Heapify constructor, which constructs an image of this sketch from
* a Memory (or WritableMemory) object that was created by this sketch
* and has a type T consistent with the given comparator and serDe.
* Once the data from the given Memory has been transferred into this heap sketch,
* the reference to the Memory object is no longer retained.
* @param srcMem the Source Memory image that contains data.
* @param comparator the comparator for this sketch and given Memory.
* @param serDe the serializer / deserializer for this sketch and the given Memory.
*/
KllHeapItemsSketch(
final Memory srcMem,
final Comparator<? super T> comparator,
final ArrayOfItemsSerDe<T> serDe) {
super(SketchStructure.UPDATABLE, comparator, serDe);
final KllMemoryValidate memVal = new KllMemoryValidate(srcMem, SketchType.ITEMS_SKETCH, serDe);
this.k = memVal.k;
this.m = memVal.m;
this.levelsArr = memVal.levelsArr;
this.readOnly = false;
this.n = memVal.n;
this.minK = memVal.minK;
this.isLevelZeroSorted = memVal.level0SortedFlag;
this.itemsArr = new Object[levelsArr[memVal.numLevels]]; //updatable size
final SketchStructure memStruct = memVal.sketchStructure;
if (memStruct == COMPACT_EMPTY) {
this.minItem = null;
this.maxItem = null;
this.itemsArr = new Object[k];
} else if (memStruct == COMPACT_SINGLE) {
final int offset = N_LONG_ADR;
final T item = serDe.deserializeFromMemory(srcMem, offset, 1)[0];
this.minItem = item;
this.maxItem = item;
itemsArr[k - 1] = item;
} else if (memStruct == COMPACT_FULL) {
int offset = DATA_START_ADR + memVal.numLevels * Integer.BYTES;
this.minItem = serDe.deserializeFromMemory(srcMem, offset, 1)[0];
offset += serDe.sizeOf(minItem);
this.maxItem = serDe.deserializeFromMemory(srcMem, offset, 1)[0];
offset += serDe.sizeOf(maxItem);
final int numRetained = levelsArr[memVal.numLevels] - levelsArr[0];
final Object[] retItems = serDe.deserializeFromMemory(srcMem, offset, numRetained);
System.arraycopy(retItems, 0, itemsArr, levelsArr[0], numRetained);
} else { //memStruct == UPDATABLE
throw new SketchesArgumentException(UNSUPPORTED_MSG + "UPDATABLE");
}
}
@Override
public int getK() {
return k;
}
@Override
public T getMaxItem() {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
return maxItem;
}
@Override
public T getMinItem() {
if (isEmpty()) { throw new SketchesArgumentException(EMPTY_MSG); }
return minItem;
}
@Override
public long getN() {
return n;
}
//restricted
@Override
int getM() {
return m;
}
@Override
int getMinK() {
return minK;
}
@Override
byte[] getMinMaxByteArr() {
final byte[] minBytes = serDe.serializeToByteArray(minItem);
final byte[] maxBytes = serDe.serializeToByteArray(maxItem);
final byte[] minMaxBytes = new byte[minBytes.length + maxBytes.length];
copyBytes(minBytes, 0, minMaxBytes, 0, minBytes.length);
copyBytes(maxBytes, 0, minMaxBytes, minBytes.length, maxBytes.length);
return minMaxBytes;
}
@Override
int getMinMaxSizeBytes() {
final int minBytes = serDe.sizeOf(minItem);
final int maxBytes = serDe.sizeOf(maxItem);
return minBytes + maxBytes;
}
@Override
T[] getRetainedItemsArray() {
final int numRet = getNumRetained();
final T[] outArr = (T[]) Array.newInstance(serDe.getClassOfT(), numRet);
System.arraycopy(itemsArr, levelsArr[0], outArr, 0 , numRet);
return outArr;
}
@Override
byte[] getRetainedItemsByteArr() {
final T[] retArr = getRetainedItemsArray();
return serDe.serializeToByteArray(retArr);
}
@Override
int getRetainedItemsSizeBytes() {
return getRetainedItemsByteArr().length;
}
@Override
T getSingleItem() {
if (n != 1L) { throw new SketchesArgumentException(NOT_SINGLE_ITEM_MSG); }
final T item = (T) itemsArr[k - 1];
return item;
}
@Override
byte[] getSingleItemByteArr() {
return serDe.serializeToByteArray(getSingleItem());
}
@Override
int getSingleItemSizeBytes() {
return serDe.sizeOf(getSingleItem());
}
@Override
T[] getTotalItemsArray() {
if (n == 0) { return (T[]) Array.newInstance(serDe.getClassOfT(), k); }
final T[] outArr = (T[]) Array.newInstance(serDe.getClassOfT(), itemsArr.length);
System.arraycopy(itemsArr, 0, outArr, 0, itemsArr.length);
return outArr;
}
@Override
WritableMemory getWritableMemory() {
return null;
}
@Override
void incN() {
n++;
}
@Override
boolean isLevelZeroSorted() {
return isLevelZeroSorted;
}
@Override
void setLevelZeroSorted(final boolean sorted) {
isLevelZeroSorted = sorted;
}
@Override
void setMinK(final int minK) {
this.minK = minK;
}
@Override
void setN(final long n) {
this.n = n;
}
@Override
void setItemsArray(final Object[] itemsArr) {
this.itemsArr = itemsArr;
}
@Override
void setItemsArrayAt(final int index, final Object item) {
this.itemsArr[index] = item;
}
@Override
void setMaxItem(final Object item) {
this.maxItem = (T) item;
}
@Override
void setMinItem(final Object item) {
this.minItem = (T) item;
}
}
| 2,655 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/kll/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* This package is for the implementations of the sketch algorithm developed by Zohar Karnin, Kevin Lang,
* and Edo Liberty that is commonly referred to as the "KLL" sketch after the authors' last names.
*/
package org.apache.datasketches.kll;
| 2,656 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/sampling/ReservoirSize.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.sampling;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.common.Util;
/**
* This class provides a compact representation of reservoir size by encoding it into a
* fixed-point 16-bit value.
* <p>The value itself is a fractional power of 2, with 5 bits of exponent and 11 bits of
* mantissa. The exponent allows a choice of anywhere from 0-30, and there are 2048 possible
* reservoir size values within each octave. Because reservoir size must be an integer, this
* means that some sizes below 2048 may have multiple valid encodings.</p>
*
* <p>Reservoir sizes can be specified exactly to 4096, and may be off by up to 0.03% thereafter.
* The value returned is always at least as large as the requested size, but may be larger.
* </p>
*
* <p>NOTE: Numerical instability may cause an off-by-one error on reservoir size, causing a
* slight increase in storage over the optimal value.</p>
*
* @author Jon Malkin
*/
final class ReservoirSize {
/**
* Number of bins per power of two.
*/
static final int BINS_PER_OCTAVE = 2048;
/**
* Precomputed inverse values for efficiency
*/
private static final double INV_BINS_PER_OCTAVE = 1.0 / BINS_PER_OCTAVE;
private static final double INV_LN_2 = 1.0 / Math.log(2.0);
/**
* Values for encoding/decoding
*/
private static final int EXPONENT_MASK = 0x1F;
private static final int EXPONENT_SHIFT = 11;
private static final int INDEX_MASK = 0x07FF;
private static final int OUTPUT_MASK = 0xFFFF;
private static final int MAX_ABS_VALUE = 2146959360;
private static final int MAX_ENC_VALUE = 0xF7FF; // p=30, i=2047
private ReservoirSize() {}
/**
* Given target reservoir size k, computes the smallest representable reservoir size that can
* hold k entries and returns it in a 16-bit fixed-point format as a <code>short</code>.
*
* @param k target reservoir size
* @return reservoir size as 16-bit encoded value
*/
public static short computeSize(final int k) {
if ((k < 1) || (k > MAX_ABS_VALUE)) {
throw new SketchesArgumentException("Can only encode strictly positive sketch sizes "
+ "less than " + MAX_ABS_VALUE + ", found: " + k);
}
final int p = Util.exactLog2OfInt(Util.floorPowerOf2(k), "computeSize: p");
// because of floor() + 1 below, need to check power of 2 here
if (Util.isIntPowerOf2(k)) {
return (short) (((p & EXPONENT_MASK) << EXPONENT_SHIFT) & OUTPUT_MASK);
}
// mantissa is scalar in range [1,2); can reconstruct k as m * 2^p
final double m = Math.pow(2.0, (Math.log(k) * INV_LN_2) - p);
// Convert to index offset: ceil(m * BPO) - BPO
// Typically in range range 0-(BINS_PER_OCTAVE-1) (but see note below)
final int i = ((int) Math.floor(m * BINS_PER_OCTAVE) - BINS_PER_OCTAVE) + 1;
// Due to ceiling, possible to overflow BINS_PER_OCTAVE
// E.g., if BPO = 2048 then for k=32767 we have p=14. Except that 32767 > decodeValue
// (p=14, i=2047)=32756, so we encode and return p+1
if (i == BINS_PER_OCTAVE) {
return (short) ((((p + 1) & EXPONENT_MASK) << EXPONENT_SHIFT) & OUTPUT_MASK);
}
return (short) (((p & EXPONENT_MASK) << EXPONENT_SHIFT) | ((i & INDEX_MASK) & OUTPUT_MASK));
}
/**
* Decodes the 16-bit reservoir size value into an int.
*
* @param encodedSize Encoded 16-bit value
* @return int represented by <code>encodedSize</code>
*/
public static int decodeValue(final short encodedSize) {
final int value = encodedSize & 0xFFFF;
if (value > MAX_ENC_VALUE) {
throw new SketchesArgumentException("Maximum valid encoded value is "
+ Integer.toHexString(MAX_ENC_VALUE) + ", found: " + value);
}
final int p = (value >>> EXPONENT_SHIFT) & EXPONENT_MASK;
final int i = value & INDEX_MASK;
return (int) ((1 << p) * ((i * INV_BINS_PER_OCTAVE) + 1.0));
}
}
| 2,657 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/sampling/ReservoirLongsUnion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.sampling;
import static org.apache.datasketches.common.Util.LS;
import static org.apache.datasketches.sampling.PreambleUtil.EMPTY_FLAG_MASK;
import static org.apache.datasketches.sampling.PreambleUtil.FAMILY_BYTE;
import static org.apache.datasketches.sampling.PreambleUtil.SER_VER;
import static org.apache.datasketches.sampling.PreambleUtil.extractEncodedReservoirSize;
import static org.apache.datasketches.sampling.PreambleUtil.extractFlags;
import static org.apache.datasketches.sampling.PreambleUtil.extractMaxK;
import static org.apache.datasketches.sampling.PreambleUtil.extractPreLongs;
import static org.apache.datasketches.sampling.PreambleUtil.extractSerVer;
import org.apache.datasketches.common.Family;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
/**
* Class to union reservoir samples of longs.
*
* <p>
* For efficiency reasons, the unioning process picks one of the two sketches to use as the base. As
* a result, we provide only a stateful union. Using the same approach for a merge would result in
* unpredictable side effects on the underlying sketches.
* </p>
*
* <p>
* A union object is created with a maximum value of <code>k</code>, represented using the ReservoirSize
* class. The unioning process may cause the actual number of samples to fall below that maximum
* value, but never to exceed it. The result of a union will be a reservoir where each item from the
* global input has a uniform probability of selection, but there are no claims about higher order
* statistics. For instance, in general all possible permutations of the global input are not
* equally likely.
* </p>
*
* @author Jon Malkin
* @author Kevin Lang
*/
public final class ReservoirLongsUnion {
private ReservoirLongsSketch gadget_;
private final int maxK_;
/**
* Empty constructor using ReservoirSize-encoded maxK value
*
* @param maxK Maximum allowed reservoir capacity for this union
*/
private ReservoirLongsUnion(final int maxK) {
maxK_ = maxK;
}
/**
* Creates an empty Union with a maximum reservoir capacity of size k.
*
* @param maxK The maximum allowed reservoir capacity for any sketches in the union
* @return A new ReservoirLongsUnion
*/
public static ReservoirLongsUnion newInstance(final int maxK) {
return new ReservoirLongsUnion(maxK);
}
/**
* Instantiates a Union from Memory
*
* @param srcMem Memory object containing a serialized union
* @return A ReservoirLongsUnion created from the provided Memory
*/
public static ReservoirLongsUnion heapify(final Memory srcMem) {
Family.RESERVOIR_UNION.checkFamilyID(srcMem.getByte(FAMILY_BYTE));
final int numPreLongs = extractPreLongs(srcMem);
final int serVer = extractSerVer(srcMem);
final boolean isEmpty = (extractFlags(srcMem) & EMPTY_FLAG_MASK) != 0;
int maxK = extractMaxK(srcMem);
final boolean preLongsEqMin = (numPreLongs == Family.RESERVOIR_UNION.getMinPreLongs());
final boolean preLongsEqMax = (numPreLongs == Family.RESERVOIR_UNION.getMaxPreLongs());
if (!preLongsEqMin && !preLongsEqMax) {
throw new SketchesArgumentException("Possible corruption: Non-empty union with only "
+ Family.RESERVOIR_UNION.getMinPreLongs() + "preLongs");
}
if (serVer != SER_VER) {
if (serVer == 1) {
final short encMaxK = extractEncodedReservoirSize(srcMem);
maxK = ReservoirSize.decodeValue(encMaxK);
} else {
throw new SketchesArgumentException(
"Possible Corruption: Ser Ver must be " + SER_VER + ": " + serVer);
}
}
final ReservoirLongsUnion rlu = new ReservoirLongsUnion(maxK);
if (!isEmpty) {
final int preLongBytes = numPreLongs << 3;
final Memory sketchMem =
srcMem.region(preLongBytes, srcMem.getCapacity() - preLongBytes);
rlu.update(sketchMem);
}
return rlu;
}
/**
* Returns the maximum allowed reservoir capacity in this union. The current reservoir capacity
* may be lower.
*
* @return The maximum allowed reservoir capacity in this union.
*/
public int getMaxK() { return maxK_; }
/**
* Union the given sketch.
* <p>
* This method can be repeatedly called. If the given sketch is null it is interpreted as an empty
* sketch.
* </p>
*
* @param sketchIn The incoming sketch.
*/
public void update(final ReservoirLongsSketch sketchIn) {
if (sketchIn == null) {
return;
}
final ReservoirLongsSketch rls =
(sketchIn.getK() <= maxK_ ? sketchIn : sketchIn.downsampledCopy(maxK_));
// can modify the sketch if we downsampled, otherwise may need to copy it
final boolean isModifiable = (sketchIn != rls);
if (gadget_ == null) {
createNewGadget(rls, isModifiable);
} else {
twoWayMergeInternal(rls, isModifiable);
}
}
/**
* Union the given Memory image of the sketch.
*
* <p>
* This method can be repeatedly called. If the given sketch is null it is interpreted as an empty
* sketch.
* </p>
*
* @param mem Memory image of sketch to be merged
*/
public void update(final Memory mem) {
if (mem == null) {
return;
}
ReservoirLongsSketch rls = ReservoirLongsSketch.heapify(mem);
rls = (rls.getK() <= maxK_ ? rls : rls.downsampledCopy(maxK_));
if (gadget_ == null) {
createNewGadget(rls, true);
} else {
twoWayMergeInternal(rls, true);
}
}
/**
* Present this union with a long.
*
* @param datum The given long datum.
*/
public void update(final long datum) {
if (gadget_ == null) {
gadget_ = ReservoirLongsSketch.newInstance(maxK_);
}
gadget_.update(datum);
}
/**
* Resets this Union. MaxK remains intact, otherwise reverts back to its virgin state.
*/
void reset() {
gadget_.reset();
}
/**
* Returns a sketch representing the current state of the union.
*
* @return The result of any unions already processed.
*/
public ReservoirLongsSketch getResult() {
return (gadget_ != null ? gadget_.copy() : null);
}
/**
* Returns a human-readable summary of the sketch, without items.
*
* @return A string version of the sketch summary
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
final String thisSimpleName = this.getClass().getSimpleName();
sb.append(LS);
sb.append("### ").append(thisSimpleName).append(" SUMMARY: ").append(LS);
sb.append("Max k: ").append(maxK_).append(LS);
if (gadget_ == null) {
sb.append("Gadget is null").append(LS);
} else {
sb.append("Gadget summary: ").append(gadget_.toString());
}
sb.append("### END UNION SUMMARY").append(LS);
return sb.toString();
}
/**
* Returns a byte array representation of this union
*
* @return a byte array representation of this union
*/
// gadgetBytes will be null only if gadget_ == null AND empty == true
public byte[] toByteArray() {
final int preLongs, outBytes;
final boolean empty = gadget_ == null;
final byte[] gadgetBytes = (gadget_ != null ? gadget_.toByteArray() : null);
if (empty) {
preLongs = Family.RESERVOIR_UNION.getMinPreLongs();
outBytes = 8;
} else {
preLongs = Family.RESERVOIR_UNION.getMaxPreLongs();
outBytes = (preLongs << 3) + gadgetBytes.length; // longs, so we know the size
}
final byte[] outArr = new byte[outBytes];
final WritableMemory mem = WritableMemory.writableWrap(outArr);
// construct header
PreambleUtil.insertPreLongs(mem, preLongs); // Byte 0
PreambleUtil.insertSerVer(mem, SER_VER); // Byte 1
PreambleUtil.insertFamilyID(mem, Family.RESERVOIR_UNION.getID()); // Byte 2
if (empty) {
PreambleUtil.insertFlags(mem, EMPTY_FLAG_MASK); // Byte 3
} else {
PreambleUtil.insertFlags(mem, 0);
}
PreambleUtil.insertMaxK(mem, maxK_); // Bytes 4-7
if (!empty) {
final int preBytes = preLongs << 3;
mem.putByteArray(preBytes, gadgetBytes, 0, gadgetBytes.length);
}
return outArr;
}
private void createNewGadget(final ReservoirLongsSketch sketchIn,
final boolean isModifiable) {
if ((sketchIn.getK() < maxK_) && (sketchIn.getN() <= sketchIn.getK())) {
// incoming sketch is in exact mode with sketch's k < maxK,
// so we can create a gadget at size maxK and keep everything
// NOTE: assumes twoWayMergeInternal first checks if sketchIn is in exact mode
gadget_ = ReservoirLongsSketch.newInstance(maxK_);
twoWayMergeInternal(sketchIn, isModifiable); // isModifiable could be fixed to false here
} else {
// use the input sketch as gadget, copying if needed
gadget_ = (isModifiable ? sketchIn : sketchIn.copy());
}
}
// We make a three-way classification of sketch states.
// "uni" when (n < k); source of unit weights, can only accept unit weights
// "mid" when (n == k); source of unit weights, can accept "light" general weights.
// "gen" when (n > k); source of general weights, can accept "light" general weights.
// source target status update notes
// ----------------------------------------------------------------------------------------------
// uni,mid uni okay standard target might transition to mid and gen
// uni,mid mid,gen okay standard target might transition to gen
// gen uni must swap N/A
// gen mid,gen maybe swap weighted N assumes fractional values during merge
// ----------------------------------------------------------------------------------------------
// Here is why in the (gen, gen) merge case, the items will be light enough in at least one
// direction:
// Obviously either (n_s/k_s <= n_t/k_t) OR (n_s/k_s >= n_t/k_t).
// WLOG say its the former, then (n_s/k_s < n_t/(k_t - 1)) provided n_t > 0 and k_t > 1
/**
* This either merges sketchIn into gadget_ or gadget_ into sketchIn. If merging into sketchIn
* with isModifiable set to false, copies elements from sketchIn first, leaving original
* unchanged.
*
* @param sketchIn Sketch with new samples from which to draw
* @param isModifiable Flag indicating whether sketchIn can be modified (e.g. if it was rebuild
* from Memory)
*/
private void twoWayMergeInternal(final ReservoirLongsSketch sketchIn,
final boolean isModifiable) {
if (sketchIn.getN() <= sketchIn.getK()) {
twoWayMergeInternalStandard(sketchIn);
} else if (gadget_.getN() < gadget_.getK()) {
// merge into sketchIn, so swap first
final ReservoirLongsSketch tmpSketch = gadget_;
gadget_ = (isModifiable ? sketchIn : sketchIn.copy());
twoWayMergeInternalStandard(tmpSketch);
} else if (sketchIn.getImplicitSampleWeight() < (gadget_.getN()
/ ((double) (gadget_.getK() - 1)))) {
// implicit weights in sketchIn are light enough to merge into gadget
twoWayMergeInternalWeighted(sketchIn);
} else {
// Use next next line for an assert/exception?
// gadget_.getImplicitSampleWeight() < sketchIn.getN() / ((double) (sketchIn.getK() - 1)))
// implicit weights in gadget are light enough to merge into sketchIn, so swap first
final ReservoirLongsSketch tmpSketch = gadget_;
gadget_ = (isModifiable ? sketchIn : sketchIn.copy());
twoWayMergeInternalWeighted(tmpSketch);
}
}
// should be called ONLY by twoWayMergeInternal
private void twoWayMergeInternalStandard(final ReservoirLongsSketch source) {
assert (source.getN() <= source.getK());
final int numInputSamples = source.getNumSamples();
for (int i = 0; i < numInputSamples; ++i) {
gadget_.update(source.getValueAtPosition(i));
}
}
// should be called ONLY by twoWayMergeInternal
private void twoWayMergeInternalWeighted(final ReservoirLongsSketch source) {
// gadget_ capable of accepting (light) general weights
assert (gadget_.getN() >= gadget_.getK());
final int numSourceSamples = source.getK();
final double sourceItemWeight = (source.getN() / (double) numSourceSamples);
final double rescaled_prob = gadget_.getK() * sourceItemWeight; // K * weight
double targetTotal = gadget_.getN(); // assumes fractional values during merge
final int tgtK = gadget_.getK();
for (int i = 0; i < numSourceSamples; ++i) {
// inlining the update procedure, using targetTotal for the fractional N values
// similar to ReservoirLongsSketch.update()
// p(keep_new_item) = (k * w) / newTotal
// require p(keep_new_item) < 1.0, meaning strict lightness
targetTotal += sourceItemWeight;
final double rescaled_one = targetTotal;
assert (rescaled_prob < rescaled_one); // Use an exception to enforce strict lightness?
final double rescaled_flip = rescaled_one * SamplingUtil.rand().nextDouble();
if (rescaled_flip < rescaled_prob) {
// Intentionally NOT doing optimization to extract slot number from rescaled_flip.
// Grabbing new random bits to ensure all slots in play
final int slotNo = SamplingUtil.rand().nextInt(tgtK);
gadget_.insertValueAtPosition(source.getValueAtPosition(i), slotNo);
} // end of inlined weight update
} // end of loop over source samples
// targetTotal was fractional but should now be an integer again. Could validate with
// low tolerance, but for now just round to check.
final long checkN = (long) Math.floor(0.5 + targetTotal);
gadget_.forceIncrementItemsSeen(source.getN());
assert (checkN == gadget_.getN());
}
}
| 2,658 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/sampling/VarOptItemsSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.sampling;
import static org.apache.datasketches.common.Util.LS;
import static org.apache.datasketches.sampling.PreambleUtil.EMPTY_FLAG_MASK;
import static org.apache.datasketches.sampling.PreambleUtil.GADGET_FLAG_MASK;
import static org.apache.datasketches.sampling.PreambleUtil.SER_VER;
import static org.apache.datasketches.sampling.PreambleUtil.TOTAL_WEIGHT_R_DOUBLE;
import static org.apache.datasketches.sampling.PreambleUtil.VO_PRELONGS_EMPTY;
import static org.apache.datasketches.sampling.PreambleUtil.VO_PRELONGS_FULL;
import static org.apache.datasketches.sampling.PreambleUtil.VO_PRELONGS_WARMUP;
import static org.apache.datasketches.sampling.PreambleUtil.extractFamilyID;
import static org.apache.datasketches.sampling.PreambleUtil.extractFlags;
import static org.apache.datasketches.sampling.PreambleUtil.extractHRegionItemCount;
import static org.apache.datasketches.sampling.PreambleUtil.extractK;
import static org.apache.datasketches.sampling.PreambleUtil.extractN;
import static org.apache.datasketches.sampling.PreambleUtil.extractRRegionItemCount;
import static org.apache.datasketches.sampling.PreambleUtil.extractResizeFactor;
import static org.apache.datasketches.sampling.PreambleUtil.extractSerVer;
import static org.apache.datasketches.sampling.PreambleUtil.extractTotalRWeight;
import static org.apache.datasketches.sampling.PreambleUtil.getAndCheckPreLongs;
import static org.apache.datasketches.sampling.SamplingUtil.pseudoHypergeometricLBonP;
import static org.apache.datasketches.sampling.SamplingUtil.pseudoHypergeometricUBonP;
import java.lang.reflect.Array;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.function.Predicate;
import org.apache.datasketches.common.ArrayOfBooleansSerDe;
import org.apache.datasketches.common.ArrayOfItemsSerDe;
import org.apache.datasketches.common.Family;
import org.apache.datasketches.common.ResizeFactor;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.common.SketchesStateException;
import org.apache.datasketches.common.Util;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
/**
* This sketch provides a variance optimal sample over an input stream of weighted items. The
* sketch can be used to compute subset sums over predicates, producing estimates with optimal
* variance for a given sketch size.
*
* <p>Using this sketch with uniformly constant item weights (e.g. 1.0) will produce a standard
* reservoir sample over the steam.</p>
*
* @param <T> The type of object held in the sketch.
*
* @author Jon Malkin
* @author Kevin Lang
*/
public final class VarOptItemsSketch<T> {
/**
* The smallest sampling array allocated: 16
*/
private static final int MIN_LG_ARR_ITEMS = 4;
/**
* Default sampling size multiple when reallocating storage: 8
*/
private static final ResizeFactor DEFAULT_RESIZE_FACTOR = ResizeFactor.X8;
private static final ArrayOfBooleansSerDe MARK_SERDE = new ArrayOfBooleansSerDe();
private int k_; // max size of sketch, in items
private int currItemsAlloc_; // currently allocated array size
private final ResizeFactor rf_; // resize factor
private ArrayList<T> data_; // stored sampled items
private ArrayList<Double> weights_; // weights for sampled items
private long n_; // total number of items processed by the sketch
private int h_; // number of items in heap
private int m_; // number of items in middle region
private int r_; // number of items in reservoir-like area
private double totalWtR_; // total weight of items in reservoir-like area
// The next two fields are hidden from the user because they are part of the state of the
// unioning algorithm, NOT part of a varopt sketch, or even of a varopt "gadget" (our name for
// the potentially invalid sketch that is maintained by the unioning algorithm). It would make
// more sense logically for these fields to be declared in the unioning object (whose entire
// purpose is storing the state of the unioning algorithm) but for reasons of programming
// convenience we are currently declaring them here. However, that could change in the future.
// Following int is:
// 1. Zero (for a varopt sketch)
// 2. Count of marked items in H region, if part of a unioning algo's gadget
private int numMarksInH_;
// The following array is absent in a varopt sketch, and notionally present in a gadget
// [although it really belongs in the unioning object]. If the array were to be made explicit,
// some additional coding would need to be done to ensure that all of the necessary data motion
// occurs and is properly tracked.
private ArrayList<Boolean> marks_;
// used to return a shallow copy of the sketch's samples to a VarOptItemsSamples, as arrays
// with any null value stripped and the R region weight computed
class Result {
T[] items;
double[] weights;
}
private VarOptItemsSketch(final int k, final ResizeFactor rf) {
// required due to a theorem about lightness during merging
if ((k < 1) || (k > (Integer.MAX_VALUE - 1))) {
throw new SketchesArgumentException("k must be at least 1 and less than " + Integer.MAX_VALUE
+ ". Found: " + k);
}
k_ = k;
n_ = 0;
rf_ = rf;
h_ = 0;
m_ = 0;
r_ = 0;
totalWtR_ = 0;
numMarksInH_ = 0;
final int ceilingLgK = Util.exactLog2OfInt(Util.ceilingIntPowerOf2(k_), "VarOptItemsSketch");
final int initialLgSize =
SamplingUtil.startingSubMultiple(ceilingLgK, rf_.lg(), MIN_LG_ARR_ITEMS);
currItemsAlloc_ = SamplingUtil.getAdjustedSize(k_, 1 << initialLgSize);
if (currItemsAlloc_ == k_) {
++currItemsAlloc_;
}
data_ = new ArrayList<>(currItemsAlloc_);
weights_ = new ArrayList<>(currItemsAlloc_);
marks_ = null;
}
private VarOptItemsSketch(final ArrayList<T> dataList,
final ArrayList<Double> weightList,
final int k,
final long n,
final int currItemsAlloc,
final ResizeFactor rf,
final int hCount,
final int rCount,
final double totalWtR) {
assert dataList != null;
assert weightList != null;
assert dataList.size() == weightList.size();
assert currItemsAlloc >= dataList.size();
assert k >= 2;
assert n >= 0;
assert hCount >= 0;
assert rCount >= 0;
assert ((rCount == 0) && (dataList.size() == hCount)) || ((rCount > 0) && (dataList.size() == (k + 1)));
k_ = k;
n_ = n;
h_ = hCount;
r_ = rCount;
m_ = 0;
totalWtR_ = totalWtR;
currItemsAlloc_ = currItemsAlloc;
rf_ = rf;
data_ = dataList;
weights_ = weightList;
numMarksInH_ = 0;
marks_ = null;
}
/**
* Construct a varopt sampling sketch with up to k samples using the default resize factor (8).
*
* @param k Maximum size of sampling. Allocated size may be smaller until sketch fills.
* Unlike many sketches in this package, this value does <em>not</em> need to be a
* power of 2.
* @param <T> The type of object held in the sketch.
* @return A VarOptItemsSketch initialized with maximum size k and resize factor rf.
*/
public static <T> VarOptItemsSketch<T> newInstance(final int k) {
return new VarOptItemsSketch<>(k, DEFAULT_RESIZE_FACTOR);
}
/**
* Construct a varopt sampling sketch with up to k samples using the specified resize factor.
*
* @param k Maximum size of sampling. Allocated size may be smaller until sketch fills.
* Unlike many sketches in this package, this value does <em>not</em> need to be a
* power of 2. The maximum size is Integer.MAX_VALUE-1.
* @param rf <a href="{@docRoot}/resources/dictionary.html#resizeFactor">See Resize Factor</a>
* @param <T> The type of object held in the sketch.
* @return A VarOptItemsSketch initialized with maximum size k and resize factor rf.
*/
public static <T> VarOptItemsSketch<T> newInstance(final int k, final ResizeFactor rf) {
return new VarOptItemsSketch<>(k, rf);
}
/**
* Construct a varopt sketch for use as a unioning gadget, meaning the array of marked elements
* is also initialized.
*
* @param k Maximum size of sampling. Allocated size may be smaller until sketch fills.
* Unlike many sketches in this package, this value does <em>not</em> need to be a
* power of 2.
* @param <T> The type of object held in the sketch.
* @return A VarOptItemsSketch initialized with maximum size k and a valid array of marks.
*/
static <T> VarOptItemsSketch<T> newInstanceAsGadget(final int k) {
final VarOptItemsSketch<T> sketch = new VarOptItemsSketch<>(k, DEFAULT_RESIZE_FACTOR);
sketch.marks_ = new ArrayList<>(sketch.currItemsAlloc_);
return sketch;
}
/**
* Construct a varopt sketch as the output of a union's getResult() method. Because this method
* is package-private, we do not perform checks on the input values.
*
* <p>Assumes dataList.size() is the correct allocated size but does not check.</p>
*
* @param <T> The type of object held in the sketch.
* @param dataList an ArrayList of data
* @param weightList an ArrayList of weights
* @param k Maximum size of sampling. Allocated size may be smaller until sketch fills.
* Unlike many sketches in this package, this value does <em>not</em> need to be a
* power of 2.
* @param n The current count of items seen by the sketch
* @param hCount the count of heavy items
* @param rCount the reservoir count of (non-heavy) items
* @param totalWtR the sum of the reservoir weights.
* @return A VarOptItemsSketch initialized with maximum size k and a valid array of marks.
*/
static <T> VarOptItemsSketch<T> newInstanceFromUnionResult(final ArrayList<T> dataList,
final ArrayList<Double> weightList,
final int k,
final long n,
final int hCount,
final int rCount,
final double totalWtR) {
final VarOptItemsSketch<T> sketch = new VarOptItemsSketch<>(dataList, weightList, k, n,
dataList.size(), DEFAULT_RESIZE_FACTOR, hCount, rCount, totalWtR);
sketch.convertToHeap();
return sketch;
}
/**
* Returns a sketch instance of this class from the given srcMem,
* which must be a Memory representation of this sketch class.
*
* @param <T> The type of item this sketch contains
* @param srcMem a Memory representation of a sketch of this class.
* <a href="{@docRoot}/resources/dictionary.html#mem">See Memory</a>
* @param serDe An instance of ArrayOfItemsSerDe
* @return a sketch instance of this class
*/
public static <T> VarOptItemsSketch<T> heapify(final Memory srcMem,
final ArrayOfItemsSerDe<T> serDe) {
final int numPreLongs = getAndCheckPreLongs(srcMem);
final ResizeFactor rf = ResizeFactor.getRF(extractResizeFactor(srcMem));
final int serVer = extractSerVer(srcMem);
final int familyId = extractFamilyID(srcMem);
final int flags = extractFlags(srcMem);
final boolean isEmpty = (flags & EMPTY_FLAG_MASK) != 0;
final boolean isGadget = (flags & GADGET_FLAG_MASK) != 0;
// Check values
if (isEmpty) {
if (numPreLongs != VO_PRELONGS_EMPTY) {
throw new SketchesArgumentException("Possible corruption: Must be " + VO_PRELONGS_EMPTY
+ " for an empty sketch. Found: " + numPreLongs);
}
} else {
if ((numPreLongs != VO_PRELONGS_WARMUP)
&& (numPreLongs != VO_PRELONGS_FULL)) {
throw new SketchesArgumentException("Possible corruption: Must be " + VO_PRELONGS_WARMUP
+ " or " + VO_PRELONGS_FULL + " for a non-empty sketch. Found: " + numPreLongs);
}
}
if (serVer != SER_VER) {
throw new SketchesArgumentException(
"Possible Corruption: Ser Ver must be " + SER_VER + ": " + serVer);
}
final int reqFamilyId = Family.VAROPT.getID();
if (familyId != reqFamilyId) {
throw new SketchesArgumentException(
"Possible Corruption: FamilyID must be " + reqFamilyId + ": " + familyId);
}
final int k = extractK(srcMem);
if (k < 1) {
throw new SketchesArgumentException("Possible Corruption: k must be at least 1: " + k);
}
if (isEmpty) {
assert numPreLongs == Family.VAROPT.getMinPreLongs();
return new VarOptItemsSketch<>(k, rf);
}
final long n = extractN(srcMem);
if (n < 0) {
throw new SketchesArgumentException("Possible Corruption: n cannot be negative: " + n);
}
// get rest of preamble
final int hCount = extractHRegionItemCount(srcMem);
final int rCount = extractRRegionItemCount(srcMem);
if (hCount < 0) {
throw new SketchesArgumentException("Possible Corruption: H region count cannot be "
+ "negative: " + hCount);
}
if (rCount < 0) {
throw new SketchesArgumentException("Possible Corruption: R region count cannot be "
+ "negative: " + rCount);
}
double totalRWeight = 0.0;
if (numPreLongs == Family.VAROPT.getMaxPreLongs()) {
if (rCount > 0) {
totalRWeight = extractTotalRWeight(srcMem);
} else {
throw new SketchesArgumentException(
"Possible Corruption: "
+ Family.VAROPT.getMaxPreLongs() + " preLongs but no items in R region");
}
}
final int preLongBytes = numPreLongs << 3;
final int totalItems = hCount + rCount;
int allocatedItems = k + 1; // default to full
if (rCount == 0) {
// Not in sampling mode, so determine size to allocate, using ceilingLog2(hCount) as minimum
final int ceilingLgK = Util.exactLog2OfInt(Util.ceilingIntPowerOf2(k), "heapify");
final int minLgSize = Util.exactLog2OfInt(Util.ceilingIntPowerOf2(hCount), "heapify");
final int initialLgSize = SamplingUtil.startingSubMultiple(ceilingLgK, rf.lg(),
Math.max(minLgSize, MIN_LG_ARR_ITEMS));
allocatedItems = SamplingUtil.getAdjustedSize(k, 1 << initialLgSize);
if (allocatedItems == k) {
++allocatedItems;
}
}
// allocate full-sized ArrayLists, but we store only hCount weights at any moment
final long weightOffsetBytes = TOTAL_WEIGHT_R_DOUBLE + (rCount > 0 ? Double.BYTES : 0);
final ArrayList<Double> weightList = new ArrayList<>(allocatedItems);
final double[] wts = new double[allocatedItems];
srcMem.getDoubleArray(weightOffsetBytes, wts, 0, hCount);
// can't use Arrays.asList(wts) since double[] rather than Double[]
for (int i = 0; i < hCount; ++ i) {
if (wts[i] <= 0.0) {
throw new SketchesArgumentException("Possible Corruption: "
+ "Non-positive weight in heapify(): " + wts[i]);
}
weightList.add(wts[i]);
}
// marks, if we have a gadget
long markBytes = 0;
int markCount = 0;
ArrayList<Boolean> markList = null;
if (isGadget) {
final long markOffsetBytes = preLongBytes + ((long) hCount * Double.BYTES);
markBytes = ArrayOfBooleansSerDe.computeBytesNeeded(hCount);
markList = new ArrayList<>(allocatedItems);
final ArrayOfBooleansSerDe booleansSerDe = new ArrayOfBooleansSerDe();
final Boolean[] markArray = booleansSerDe.deserializeFromMemory(
srcMem.region(markOffsetBytes, (hCount >>> 3) + 1), 0, hCount);
for (Boolean mark : markArray) {
if (mark) { ++markCount; }
}
markList.addAll(Arrays.asList(markArray));
}
final long offsetBytes = preLongBytes + ((long) hCount * Double.BYTES) + markBytes;
final T[] data = serDe.deserializeFromMemory(
srcMem.region(offsetBytes, srcMem.getCapacity() - offsetBytes), 0, totalItems);
final List<T> wrappedData = Arrays.asList(data);
final ArrayList<T> dataList = new ArrayList<>(allocatedItems);
dataList.addAll(wrappedData.subList(0, hCount));
// Load items in R as needed
if (rCount > 0) {
weightList.add(-1.0); // the gap
if (isGadget) { markList.add(false); } // the gap
for (int i = 0; i < rCount; ++i) {
weightList.add(-1.0);
if (isGadget) { markList.add(false); }
}
dataList.add(null); // the gap
dataList.addAll(wrappedData.subList(hCount, totalItems));
}
final VarOptItemsSketch<T> sketch =
new VarOptItemsSketch<>(dataList, weightList, k, n,
allocatedItems, rf, hCount, rCount, totalRWeight);
if (isGadget) {
sketch.marks_ = markList;
sketch.numMarksInH_ = markCount;
}
return sketch;
}
/**
* Returns the sketch's value of <i>k</i>, the maximum number of samples stored in the
* sketch. The current number of items in the sketch may be lower.
*
* @return k, the maximum number of samples in the sketch
*/
public int getK() {
return k_;
}
/**
* Returns the number of items processed from the input stream
*
* @return n, the number of stream items the sketch has seen
*/
public long getN() {
return n_;
}
/**
* Returns the current number of items in the sketch, which may be smaller than the
* sketch capacity.
*
* @return the number of items currently in the sketch
*/
public int getNumSamples() {
return Math.min(k_, h_ + r_);
}
/**
* Gets a result iterator object.
* @return An object with an iterator over the results
*/
public VarOptItemsSamples<T> getSketchSamples() {
return new VarOptItemsSamples<>(this);
}
/**
* Randomly decide whether or not to include an item in the sample set.
*
* @param item an item of the set being sampled from
* @param weight a strictly positive weight associated with the item
*/
public void update(final T item, final double weight) {
update(item, weight, false);
}
/**
* Resets this sketch to the empty state, but retains the original value of k.
*/
public void reset() {
final int ceilingLgK = Util.exactLog2OfInt(Util.ceilingIntPowerOf2(k_), "VarOptItemsSketch");
final int initialLgSize =
SamplingUtil.startingSubMultiple(ceilingLgK, rf_.lg(), MIN_LG_ARR_ITEMS);
currItemsAlloc_ = SamplingUtil.getAdjustedSize(k_, 1 << initialLgSize);
if (currItemsAlloc_ == k_) {
++currItemsAlloc_;
}
data_ = new ArrayList<>(currItemsAlloc_);
weights_ = new ArrayList<>(currItemsAlloc_);
if (marks_ != null) {
marks_ = new ArrayList<>(currItemsAlloc_);
}
n_ = 0;
h_ = 0;
m_ = 0;
r_ = 0;
numMarksInH_ = 0;
totalWtR_ = 0.0;
}
/**
* Returns a human-readable summary of the sketch.
*
* @return A string version of the sketch summary
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
final String thisSimpleName = this.getClass().getSimpleName();
sb.append(LS);
sb.append("### ").append(thisSimpleName).append(" SUMMARY: ").append(LS);
sb.append(" k : ").append(k_).append(LS);
sb.append(" h : ").append(h_).append(LS);
sb.append(" r : ").append(r_).append(LS);
sb.append(" weight_r : ").append(totalWtR_).append(LS);
sb.append(" Current size : ").append(currItemsAlloc_).append(LS);
sb.append(" Resize factor: ").append(rf_).append(LS);
sb.append("### END SKETCH SUMMARY").append(LS);
return sb.toString();
}
/**
* Returns a human readable string of the preamble of a byte array image of a VarOptItemsSketch.
* @param byteArr the given byte array
* @return a human readable string of the preamble of a byte array image of a VarOptItemsSketch.
*/
public static String toString(final byte[] byteArr) {
return PreambleUtil.preambleToString(byteArr);
}
/**
* Returns a human readable string of the preamble of a Memory image of a VarOptItemsSketch.
* @param mem the given Memory
* @return a human readable string of the preamble of a Memory image of a VarOptItemsSketch.
*/
public static String toString(final Memory mem) {
return PreambleUtil.preambleToString(mem);
}
/**
* Returns a byte array representation of this sketch. May fail for polymorphic item types.
*
* @param serDe An instance of ArrayOfItemsSerDe
* @return a byte array representation of this sketch
*/
public byte[] toByteArray(final ArrayOfItemsSerDe<? super T> serDe) {
if ((r_ == 0) && (h_ == 0)) {
// null class is ok since empty -- no need to call serDe
return toByteArray(serDe, null);
} else {
final int validIndex = (h_ == 0 ? 1 : 0);
final Class<?> clazz = data_.get(validIndex).getClass();
return toByteArray(serDe, clazz);
}
}
/**
* Returns a byte array representation of this sketch. Copies contents into an array of the
* specified class for serialization to allow for polymorphic types.
*
* @param serDe An instance of ArrayOfItemsSerDe
* @param clazz The class represented by <T>
* @return a byte array representation of this sketch
*/
// bytes will be null only if empty == true
public byte[] toByteArray(final ArrayOfItemsSerDe<? super T> serDe, final Class<?> clazz) {
final int preLongs, numMarkBytes, outBytes;
final boolean empty = (r_ == 0) && (h_ == 0);
byte[] itemBytes = null; // for serialized items from serDe
int flags = marks_ == null ? 0 : GADGET_FLAG_MASK;
if (empty) {
preLongs = Family.VAROPT.getMinPreLongs();
outBytes = Family.VAROPT.getMinPreLongs() << 3; // only contains the minimum header info
flags |= EMPTY_FLAG_MASK;
} else {
preLongs = (r_ == 0 ? PreambleUtil.VO_PRELONGS_WARMUP : Family.VAROPT.getMaxPreLongs());
itemBytes = serDe.serializeToByteArray(getDataSamples(clazz));
numMarkBytes = marks_ == null ? 0 : ArrayOfBooleansSerDe.computeBytesNeeded(h_);
outBytes = (preLongs << 3) + (h_ * Double.BYTES) + numMarkBytes + itemBytes.length;
}
final byte[] outArr = new byte[outBytes];
final WritableMemory mem = WritableMemory.writableWrap(outArr);
// build first preLong
PreambleUtil.insertPreLongs(mem, preLongs); // Byte 0
PreambleUtil.insertLgResizeFactor(mem, rf_.lg());
PreambleUtil.insertSerVer(mem, SER_VER); // Byte 1
PreambleUtil.insertFamilyID(mem, Family.VAROPT.getID()); // Byte 2
PreambleUtil.insertFlags(mem, flags); // Byte 3
PreambleUtil.insertK(mem, k_); // Bytes 4-7
if (!empty) {
PreambleUtil.insertN(mem, n_); // Bytes 8-15
PreambleUtil.insertHRegionItemCount(mem, h_); // Bytes 16-19
PreambleUtil.insertRRegionItemCount(mem, r_); // Bytes 20-23
if (r_ > 0) {
PreambleUtil.insertTotalRWeight(mem, totalWtR_); // Bytes 24-31
}
// write the first h_ weights
int offset = preLongs << 3;
for (int i = 0; i < h_; ++i) {
mem.putDouble(offset, weights_.get(i));
offset += Double.BYTES;
}
// write the first h_ marks, iff we have a gadget
if (marks_ != null) {
final byte[] markBytes;
markBytes = MARK_SERDE.serializeToByteArray(marks_.subList(0, h_).toArray(new Boolean[0]));
mem.putByteArray(offset, markBytes, 0, markBytes.length);
offset += markBytes.length;
}
// write the sample items, using offset from earlier
mem.putByteArray(offset, itemBytes, 0, itemBytes.length);
}
return outArr;
}
/**
* Computes an estimated subset sum from the entire stream for objects matching a given
* predicate. Provides a lower bound, estimate, and upper bound using a target of 2 standard
* deviations.
*
* <p>This is technically a heuristic method, and tries to err on the conservative side.</p>
*
* @param predicate A predicate to use when identifying items.
* @return A summary object containing the estimate, upper and lower bounds, and the total
* sketch weight.
*/
public SampleSubsetSummary estimateSubsetSum(final Predicate<T> predicate) {
if (n_ == 0) {
return new SampleSubsetSummary(0.0, 0.0, 0.0, 0.0);
}
double totalWtH = 0.0;
double hTrueWeight = 0.0;
int idx = 0;
for (; idx < h_; ++idx) {
final double wt = weights_.get(idx);
totalWtH += wt;
if (predicate.test(data_.get(idx))) {
hTrueWeight += wt;
}
}
// if only heavy items, we have an exact answer
if (r_ == 0) {
return new SampleSubsetSummary(hTrueWeight, hTrueWeight, hTrueWeight, hTrueWeight);
}
final long numSampled = n_ - h_;
assert numSampled > 0;
final double effectiveSamplingRate = r_ / (double) numSampled;
assert effectiveSamplingRate >= 0.0;
assert effectiveSamplingRate <= 1.0;
int rTrueCount = 0;
++idx; // skip the gap
for (; idx < (k_ + 1); ++idx) {
if (predicate.test(data_.get(idx))) {
++rTrueCount;
}
}
final double lbTrueFraction = pseudoHypergeometricLBonP(r_, rTrueCount, effectiveSamplingRate);
final double estimatedTrueFraction = (1.0 * rTrueCount) / r_;
final double ubTrueFraction = pseudoHypergeometricUBonP(r_, rTrueCount, effectiveSamplingRate);
return new SampleSubsetSummary(
hTrueWeight + (totalWtR_ * lbTrueFraction),
hTrueWeight + (totalWtR_ * estimatedTrueFraction),
hTrueWeight + (totalWtR_ * ubTrueFraction),
totalWtH + totalWtR_);
}
/**
* Returns a VarOptItemsSketch.Result structure containing the items and weights in separate
* lists. The returned list lengths may be smaller than the total capacity.
*
* @return A Result object containing items and weights.
*/
Result getSamplesAsArrays() {
if ((r_ + h_) == 0) {
return null;
}
final int validIndex = (h_ == 0 ? 1 : 0);
final Class<?> clazz = data_.get(validIndex).getClass();
return getSamplesAsArrays(clazz);
}
/**
* Creates a copy of the sketch, optionally discarding any information about marks that would
* indicate the class's use as a union gadget as opposed to a valid sketch.
*
* @param asSketch If true, copies as a sketch; if false, copies as a union gadget
* @param adjustedN Target value of n for the resulting sketch. Ignored if negative.
* @return A copy of the sketch.
*/
VarOptItemsSketch<T> copyAndSetN(final boolean asSketch, final long adjustedN) {
final VarOptItemsSketch<T> sketch;
sketch = new VarOptItemsSketch<>(data_, weights_, k_,n_,
currItemsAlloc_, rf_, h_, r_, totalWtR_);
if (!asSketch) {
sketch.marks_ = this.marks_;
sketch.numMarksInH_ = this.numMarksInH_;
}
if (adjustedN >= 0) {
sketch.n_ = adjustedN;
}
return sketch;
}
/**
* Strips the mark array from the object, making what had been a gadget indistinguishable form
* a sketch. Avoids an extra copy.
*/
void stripMarks() {
assert marks_ != null;
numMarksInH_ = 0;
marks_ = null;
}
/**
* Returns a VarOptItemsSketch.Result structure containing the items and weights in separate
* lists. The returned list lengths may be smaller than the total capacity.
*
* <p>This method allocates an array of class <em>clazz</em>, which must either match or
* be parent of T. This method should be used when objects in the array are all instances of T
* but are not necessarily instances of the base class.</p>
*
* @param clazz A class to which the items are cast before returning
* @return A Result object containing items and weights.
*/
@SuppressWarnings("unchecked")
Result getSamplesAsArrays(final Class<?> clazz) {
if ((r_ + h_) == 0) {
return null;
}
// are Array.asList(data_.subList()) copies better?
final int numSamples = getNumSamples();
final T[] prunedItems = (T[]) Array.newInstance(clazz, numSamples);
final double[] prunedWeights = new double[numSamples];
int j = 0;
final double rWeight = totalWtR_ / r_;
for (int i = 0; j < numSamples; ++i) {
final T item = data_.get(i);
if (item != null) {
prunedItems[j] = item;
prunedWeights[j] = (weights_.get(i) > 0 ? weights_.get(i) : rWeight);
++j;
}
}
final Result output = new Result();
output.items = prunedItems;
output.weights = prunedWeights;
return output;
}
// package-private getters
// package-private: Relies on ArrayList for bounds checking and assumes caller knows how to handle
// a null from the middle of the list
T getItem(final int idx) {
return data_.get(idx);
}
// package-private: Relies on ArrayList for bounds checking and assumes caller knows how to handle
// a negative value (whether from the null in the middle or an R-region item)
double getWeight(final int idx) {
return weights_.get(idx);
}
// package-private: Relies on ArrayList for bounds checking and assumes caller knows how to
// handle a null from the middle of the list.
boolean getMark(final int idx) { return marks_.get(idx); }
int getHRegionCount() {
return h_;
}
int getRRegionCount() { return r_; }
int getNumMarksInH() { return numMarksInH_; }
// Needed by result object and for unioning
double getTau() {
return r_ == 0 ? Double.NaN : (totalWtR_ / r_);
}
double getTotalWtR() {
return totalWtR_;
}
// package-private setter, used to resolve gadget into sketch during union
void forceSetK(final int k) {
assert k > 0;
k_ = k;
}
/**
* Internal implementation of update() which requires the user to know if an item is
* marked as coming from the reservoir region of a sketch. The marks are used only in
* merging.
*
* @param item an item of the set being sampled from
* @param weight a strictly positive weight associated with the item
* @param mark true if an item comes from a sketch's reservoir region
*/
void update(final T item, final double weight, final boolean mark) {
if (item == null) {
return;
}
if (weight <= 0.0) {
throw new SketchesArgumentException("Item weights must be strictly positive: "
+ weight + ", for item " + item.toString());
}
++n_;
if (r_ == 0) {
// exact mode
updateWarmupPhase(item, weight, mark);
} else {
// sketch is in estimation mode, so we can make the following check
assert (h_ == 0) || (peekMin() >= getTau());
// what tau would be if deletion candidates turn out to be R plus the new item
// note: (r_ + 1) - 1 is intentional
final double hypotheticalTau = (weight + totalWtR_) / ((r_ + 1) - 1);
// is new item's turn to be considered for reservoir?
final boolean condition1 = (h_ == 0) || (weight <= peekMin());
// is new item light enough for reservoir?
final boolean condition2 = weight < hypotheticalTau;
if (condition1 && condition2) {
updateLight(item, weight, mark);
} else if (r_ == 1) {
updateHeavyREq1(item, weight, mark);
} else {
updateHeavyGeneral(item, weight, mark);
}
}
}
/**
* Decreases sketch's value of k by 1, updating stored values as needed.
*
* <p>Subject to certain pre-conditions, decreasing k causes tau to increase. This fact is used by
* the unioning algorithm to force "marked" items out of H and into the reservoir region.</p>
*/
void decreaseKBy1() {
if (k_ <= 1) {
throw new SketchesStateException("Cannot decrease k below 1 in union");
}
if ((h_ == 0) && (r_ == 0)) {
// exact mode, but no data yet; this reduction is somewhat gratuitous
--k_;
} else if ((h_ > 0) && (r_ == 0)) {
// exact mode, but we have some data
--k_;
if (h_ > k_) {
transitionFromWarmup();
}
} else if ((h_ > 0) && (r_ > 0)) {
// reservoir mode, but we have some exact samples.
// Our strategy will be to pull an item out of H (which we are allowed to do since it's
// still just data), reduce k, and then re-insert the item
// first, slide the R zone to the left by 1, temporarily filling the gap
final int oldGapIdx = h_;
final int oldFinalRIdx = (h_ + 1 + r_) - 1;
assert oldFinalRIdx == k_;
swapValues(oldFinalRIdx, oldGapIdx);
// now we pull an item out of H; any item is ok, but if we grab the rightmost and then
// reduce h_, the heap invariant will be preserved (and the gap will be restored), plus
// the push() of the item that will probably happen later will be cheap.
final int pulledIdx = h_ - 1;
final T pulledItem = data_.get(pulledIdx);
final double pulledWeight = weights_.get(pulledIdx);
final boolean pulledMark = marks_.get(pulledIdx);
if (pulledMark) { --numMarksInH_; }
weights_.set(pulledIdx, -1.0); // to make bugs easier to spot
--h_;
--k_;
--n_; // will be re-incremented with the update
update(pulledItem, pulledWeight, pulledMark);
} else if ((h_ == 0) && (r_ > 0)) {
// pure reservoir mode, so can simply eject a randomly chosen sample from the reservoir
assert r_ >= 2;
final int rIdxToDelete = 1 + SamplingUtil.rand().nextInt(r_); // 1 for the gap
final int rightmostRIdx = (1 + r_) - 1;
swapValues(rIdxToDelete, rightmostRIdx);
weights_.set(rightmostRIdx, -1.0);
--k_;
--r_;
}
}
/* In the "light" case the new item has weight <= old_tau, so
would appear to the right of the R items in a hypothetical reverse-sorted
list. It is easy to prove that it is light enough to be part of this
round's downsampling */
private void updateLight(final T item, final double weight, final boolean mark) {
assert r_ >= 1;
assert (r_ + h_) == k_;
final int mSlot = h_; // index of the gap, which becomes the M region
data_.set(mSlot, item);
weights_.set(mSlot, weight);
if (marks_ != null) { marks_.set(mSlot, mark); }
++m_;
growCandidateSet(totalWtR_ + weight, r_ + 1);
}
/* In the "heavy" case the new item has weight > old_tau, so would
appear to the left of items in R in a hypothetical reverse-sorted list and
might or might not be light enough be part of this round's downsampling.
[After first splitting off the R=1 case] we greatly simplify the code by
putting the new item into the H heap whether it needs to be there or not.
In other words, it might go into the heap and then come right back out,
but that should be okay because pseudo_heavy items cannot predominate
in long streams unless (max wt) / (min wt) > o(exp(N)) */
private void updateHeavyGeneral(final T item, final double weight, final boolean mark) {
assert m_ == 0;
assert r_ >= 2;
assert (r_ + h_) == k_;
// put into H, although may come back out momentarily
push(item, weight, mark);
growCandidateSet(totalWtR_, r_);
}
/* The analysis of this case is similar to that of the general heavy case.
The one small technical difference is that since R < 2, we must grab an M item
to have a valid starting point for continue_by_growing_candidate_set () */
private void updateHeavyREq1(final T item, final double weight, final boolean mark) {
assert m_ == 0;
assert r_ == 1;
assert (r_ + h_) == k_;
push(item, weight, mark); // new item into H
popMinToMRegion(); // pop lightest back into M
// Any set of two items is downsample-able to one item,
// so the two lightest items are a valid starting point for the following
final int mSlot = k_ - 1; // array is k+1, 1 in R, so slot before is M
growCandidateSet(weights_.get(mSlot) + totalWtR_, 2);
}
private void updateWarmupPhase(final T item, final double wt, final boolean mark) {
assert r_ == 0;
assert m_ == 0;
assert h_ <= k_;
if (h_ >= currItemsAlloc_) {
growDataArrays();
}
// store items as they come in, until full
data_.add(h_, item);
weights_.add(h_, wt);
if (marks_ != null) { marks_.add(h_, mark); }
++h_;
numMarksInH_ += mark ? 1 : 0;
// check if need to heapify
if (h_ > k_) {
transitionFromWarmup();
}
}
private void transitionFromWarmup() {
// Move 2 lightest items from H to M
// But the lighter really belongs in R, so update counts to reflect that
convertToHeap();
popMinToMRegion();
popMinToMRegion();
--m_;
++r_;
assert h_ == (k_ - 1);
assert m_ == 1;
assert r_ == 1;
// Update total weight in R then, having grabbed the value, overwrite in
// weight_ array to help make bugs more obvious
totalWtR_ = weights_.get(k_); // only one item, known location
weights_.set(k_, -1.0);
// The two lightest items are necessarily downsample-able to one item, and are therefore a
// valid initial candidate set.
growCandidateSet(weights_.get(k_ - 1) + totalWtR_, 2);
}
/* Validates the heap condition for the weight array */
/*
private void validateHeap() {
for (int j = h_ - 1; j >= 1; --j) {
final int p = ((j + 1) / 2) - 1;
assert weights_.get(p) <= weights_.get(j);
}
}
*/
/* Converts the data_ and weights_ arrays to heaps. In contrast to other parts
of the library, this has nothing to do with on- or off-heap storage or the
Memory package.
*/
private void convertToHeap() {
if (h_ < 2) {
return; // nothing to do
}
final int lastSlot = h_ - 1;
final int lastNonLeaf = ((lastSlot + 1) / 2) - 1;
for (int j = lastNonLeaf; j >= 0; --j) {
restoreTowardsLeaves(j);
}
//validateHeap();
}
private void restoreTowardsLeaves(final int slotIn) {
assert h_ > 0;
final int lastSlot = h_ - 1;
assert slotIn <= lastSlot;
int slot = slotIn;
int child = (2 * slotIn) + 1; // might be invalid, need to check
while (child <= lastSlot) {
final int child2 = child + 1; // might also be invalid
if ((child2 <= lastSlot) && (weights_.get(child2) < weights_.get(child))) {
// switch to other child if it's both valid and smaller
child = child2;
}
if (weights_.get(slot) <= weights_.get(child)) {
// invariant holds so we're done
break;
}
// swap and continue
swapValues(slot, child);
slot = child;
child = (2 * slot) + 1; // might be invalid, checked on next loop
}
}
private void restoreTowardsRoot(final int slotIn) {
int slot = slotIn;
int p = (((slot + 1) / 2) - 1); // valid if slot >= 1
while ((slot > 0) && (weights_.get(slot) < weights_.get(p))) {
swapValues(slot, p);
slot = p;
p = (((slot + 1) / 2) - 1); // valid if slot >= 1
}
}
private void push(final T item, final double wt, final boolean mark) {
data_.set(h_, item);
weights_.set(h_, wt);
if (marks_ != null) {
marks_.set(h_, mark);
numMarksInH_ += (mark ? 1 : 0);
}
++h_;
restoreTowardsRoot(h_ - 1); // need use old h_, but want accurate h_
}
private double peekMin() {
assert h_ > 0;
return weights_.get(0);
}
private void popMinToMRegion() {
assert h_ > 0;
assert (h_ + m_ + r_) == (k_ + 1);
if (h_ == 1) {
// just update bookkeeping
++m_;
--h_;
} else {
// main case
final int tgt = h_ - 1; // last slot, will swap with root
swapValues(0, tgt);
++m_;
--h_;
restoreTowardsLeaves(0);
}
if (isMarked(h_)) {
--numMarksInH_;
}
}
/* When entering here we should be in a well-characterized state where the
new item has been placed in either h or m and we have a valid but not necessarily
maximal sampling plan figured out. The array is completely full at this point.
Everyone in h and m has an explicit weight. The candidates are right-justified
and are either just the r set or the r set + exactly one m item. The number
of cands is at least 2. We will now grow the candidate set as much as possible
by pulling sufficiently light items from h to m.
*/
private void growCandidateSet(double wtCands, int numCands) {
assert (h_ + m_ + r_) == (k_ + 1);
assert numCands >= 2; // essential
assert numCands == (m_ + r_); // essential
assert (m_ == 0) || (m_ == 1);
while (h_ > 0) {
final double nextWt = peekMin();
final double nextTotWt = wtCands + nextWt;
// test for strict lightness of next prospect (denominator multiplied through)
// ideally: (nextWt * (nextNumCands-1) < nextTotWt) but can just
// use numCands directly
if ((nextWt * numCands) < nextTotWt) {
wtCands = nextTotWt;
++numCands;
popMinToMRegion(); // adjusts h_ and m_
} else {
break;
}
}
downsampleCandidateSet(wtCands, numCands);
}
private int pickRandomSlotInR() {
assert r_ > 0;
final int offset = h_ + m_;
if (r_ == 1) {
return offset;
} else {
return offset + SamplingUtil.rand().nextInt(r_);
}
}
private int chooseDeleteSlot(final double wtCand, final int numCand) {
assert r_ > 0;
if (m_ == 0) {
// this happens if we insert a really heavy item
return pickRandomSlotInR();
} else if (m_ == 1) {
// check if we keep the item in M or pick one from R
// p(keep) = (numCand - 1) * wt_M / wt_cand
final double wtMCand = weights_.get(h_); // slot of item in M is h_
if ((wtCand * SamplingUtil.nextDoubleExcludeZero()) < ((numCand - 1) * wtMCand)) {
return pickRandomSlotInR(); // keep item in M
} else {
return h_; // index of item in M
}
} else {
// general case
final int deleteSlot = chooseWeightedDeleteSlot(wtCand, numCand);
final int firstRSlot = h_ + m_;
if (deleteSlot == firstRSlot) {
return pickRandomSlotInR();
} else {
return deleteSlot;
}
}
}
private int chooseWeightedDeleteSlot(final double wtCand, final int numCand) {
assert m_ >= 1;
final int offset = h_;
final int finalM = (offset + m_) - 1;
final int numToKeep = numCand - 1;
double leftSubtotal = 0.0;
double rightSubtotal = -1.0 * wtCand * SamplingUtil.nextDoubleExcludeZero();
for (int i = offset; i <= finalM; ++i) {
leftSubtotal += numToKeep * weights_.get(i);
rightSubtotal += wtCand;
if (leftSubtotal < rightSubtotal) {
return i;
}
}
// this slot tells caller that we need to delete out of R
return finalM + 1;
}
private void downsampleCandidateSet(final double wtCands, final int numCands) {
assert numCands >= 2;
assert (h_ + numCands) == (k_ + 1);
// need this before overwriting anything
final int deleteSlot = chooseDeleteSlot(wtCands, numCands);
final int leftmostCandSlot = h_;
assert deleteSlot >= leftmostCandSlot;
assert deleteSlot <= k_;
// overwrite weights for items from M moving into R, to make bugs more obvious
final int stopIdx = leftmostCandSlot + m_;
for (int j = leftmostCandSlot; j < stopIdx; ++j) {
weights_.set(j, -1.0);
}
// The next two lines work even when deleteSlot == leftmostCandSlot
data_.set(deleteSlot, data_.get(leftmostCandSlot));
data_.set(leftmostCandSlot, null);
m_ = 0;
r_ = numCands - 1;
totalWtR_ = wtCands;
}
/* swap values of data_, weights_, and marks between src and dst indices */
private void swapValues(final int src, final int dst) {
final T item = data_.get(src);
data_.set(src, data_.get(dst));
data_.set(dst, item);
final Double wt = weights_.get(src);
weights_.set(src, weights_.get(dst));
weights_.set(dst, wt);
if (marks_ != null) {
final Boolean mark = marks_.get(src);
marks_.set(src, marks_.get(dst));
marks_.set(dst, mark);
}
}
private boolean isMarked(final int idx) {
return marks_ != null ? marks_.get(idx) : false;
}
/**
* Returns a copy of the items (no weights) in the sketch as members of Class <em>clazz</em>,
* or null if empty. The returned array length may be smaller than the total capacity.
*
* <p>This method allocates an array of class <em>clazz</em>, which must either match or
* extend T. This method should be used when objects in the array are all instances of T but
* are not necessarily instances of the base class.</p>
*
* @param clazz A class to which the items are cast before returning
* @return A copy of the sample array
*/
@SuppressWarnings("unchecked")
private T[] getDataSamples(final Class<?> clazz) {
assert (h_ + r_) > 0;
// are 2 Array.asList(data_.subList()) copies better?
final T[] prunedList = (T[]) Array.newInstance(clazz, getNumSamples());
int i = 0;
for (T item : data_) {
if (item != null) {
prunedList[i++] = item;
}
}
return prunedList;
}
/**
* Increases allocated sampling size by (adjusted) ResizeFactor and copies items from old
* sampling. Only happens when buffer is not full, so don't need to worry about blindly copying
* the array items.
*/
private void growDataArrays() {
currItemsAlloc_ = SamplingUtil.getAdjustedSize(k_, currItemsAlloc_ << rf_.lg());
if (currItemsAlloc_ == k_) {
++currItemsAlloc_;
}
data_.ensureCapacity(currItemsAlloc_);
weights_.ensureCapacity(currItemsAlloc_);
if (marks_ != null) {
marks_.ensureCapacity(currItemsAlloc_);
}
}
}
| 2,659 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/sampling/SampleSubsetSummary.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.sampling;
/**
* A simple object o capture the results of a subset sum query on a sampling sketch.
*
* @author Jon Malkin
*/
public class SampleSubsetSummary {
private double lowerBound;
private double estimate;
private double upperBound;
private double totalSketchWeight;
SampleSubsetSummary(final double lowerBound,
final double estimate,
final double upperBound,
final double totalSketchWeight) {
this.lowerBound = lowerBound;
this.estimate = estimate;
this.upperBound = upperBound;
this.totalSketchWeight = totalSketchWeight;
}
/**
* @return the Lower Bound
*/
public double getLowerBound() {
return lowerBound;
}
/**
* @return the total sketch weight
*/
public double getTotalSketchWeight() {
return totalSketchWeight;
}
/**
* @return the Upper Bound
*/
public double getUpperBound() {
return upperBound;
}
/**
* @return the unique count estimate
*/
public double getEstimate() {
return estimate;
}
}
| 2,660 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/sampling/ReservoirItemsUnion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.sampling;
import static org.apache.datasketches.common.Util.LS;
import static org.apache.datasketches.sampling.PreambleUtil.EMPTY_FLAG_MASK;
import static org.apache.datasketches.sampling.PreambleUtil.FAMILY_BYTE;
import static org.apache.datasketches.sampling.PreambleUtil.SER_VER;
import static org.apache.datasketches.sampling.PreambleUtil.extractEncodedReservoirSize;
import static org.apache.datasketches.sampling.PreambleUtil.extractFlags;
import static org.apache.datasketches.sampling.PreambleUtil.extractMaxK;
import static org.apache.datasketches.sampling.PreambleUtil.extractPreLongs;
import static org.apache.datasketches.sampling.PreambleUtil.extractSerVer;
import java.util.ArrayList;
import org.apache.datasketches.common.ArrayOfItemsSerDe;
import org.apache.datasketches.common.Family;
import org.apache.datasketches.common.ResizeFactor;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
/**
* Class to union reservoir samples of generic items.
*
* <p>For efficiency reasons, the unioning process picks one of the two sketches to use as the
* base. As a result, we provide only a stateful union. Using the same approach for a merge would
* result in unpredictable side effects on the underlying sketches.</p>
*
* <p>A union object is created with a maximum value of <code>k</code>, represented using the
* ReservoirSize class. The unioning process may cause the actual number of samples to fall below
* that maximum value, but never to exceed it. The result of a union will be a reservoir where
* each item from the global input has a uniform probability of selection, but there are no
* claims about higher order statistics. For instance, in general all possible permutations of
* the global input are not equally likely.</p>
*
* <p>If taking the union of two reservoirs of different sizes, the output sample will contain no more
* than MIN(k_1, k_2) samples.</p>
*
* @param <T> The specific Java type for this sketch
*
* @author Jon Malkin
* @author Kevin Lang
*/
public final class ReservoirItemsUnion<T> {
private ReservoirItemsSketch<T> gadget_;
private final int maxK_;
/**
* Empty constructor using ReservoirSize-encoded maxK value
*
* @param maxK Maximum allowed reservoir capacity for this union
*/
private ReservoirItemsUnion(final int maxK) {
maxK_ = maxK;
}
/**
* Creates an empty Union with a maximum reservoir capacity of size k.
*
* @param <T> The type of item this sketch contains
* @param maxK The maximum allowed reservoir capacity for any sketches in the union
* @return A new ReservoirItemsUnion
*/
public static <T> ReservoirItemsUnion<T> newInstance(final int maxK) {
return new ReservoirItemsUnion<>(maxK);
}
/**
* Instantiates a Union from Memory
*
* @param <T> The type of item this sketch contains
* @param srcMem Memory object containing a serialized union
* @param serDe An instance of ArrayOfItemsSerDe
* @return A ReservoirItemsUnion created from the provided Memory
*/
public static <T> ReservoirItemsUnion<T> heapify(final Memory srcMem,
final ArrayOfItemsSerDe<T> serDe) {
Family.RESERVOIR_UNION.checkFamilyID(srcMem.getByte(FAMILY_BYTE));
final int numPreLongs = extractPreLongs(srcMem);
final int serVer = extractSerVer(srcMem);
final boolean isEmpty = (extractFlags(srcMem) & EMPTY_FLAG_MASK) != 0;
int maxK = extractMaxK(srcMem);
final boolean preLongsEqMin = (numPreLongs == Family.RESERVOIR_UNION.getMinPreLongs());
final boolean preLongsEqMax = (numPreLongs == Family.RESERVOIR_UNION.getMaxPreLongs());
if (!preLongsEqMin && !preLongsEqMax) {
throw new SketchesArgumentException("Possible corruption: Non-empty union with only "
+ Family.RESERVOIR_UNION.getMinPreLongs() + "preLongs");
}
if (serVer != SER_VER) {
if (serVer == 1) {
final short encMaxK = extractEncodedReservoirSize(srcMem);
maxK = ReservoirSize.decodeValue(encMaxK);
} else {
throw new SketchesArgumentException(
"Possible Corruption: Ser Ver must be " + SER_VER + ": " + serVer);
}
}
final ReservoirItemsUnion<T> riu = new ReservoirItemsUnion<>(maxK);
if (!isEmpty) {
final int preLongBytes = numPreLongs << 3;
final Memory sketchMem =
srcMem.region(preLongBytes, srcMem.getCapacity() - preLongBytes);
riu.update(sketchMem, serDe);
}
return riu;
}
/**
* Returns the maximum allowed reservoir capacity in this union. The current reservoir capacity
* may be lower.
*
* @return The maximum allowed reservoir capacity in this union.
*/
public int getMaxK() { return maxK_; }
/**
* Union the given sketch. This method can be repeatedly called. If the given sketch is null it is
* interpreted as an empty sketch.
*
* @param sketchIn The incoming sketch.
*/
public void update(final ReservoirItemsSketch<T> sketchIn) {
if (sketchIn == null) {
return;
}
final ReservoirItemsSketch<T> ris =
(sketchIn.getK() <= maxK_ ? sketchIn : sketchIn.downsampledCopy(maxK_));
// can modify the sketch if we downsampled, otherwise may need to copy it
final boolean isModifiable = (sketchIn != ris);
if (gadget_ == null) {
createNewGadget(ris, isModifiable);
} else {
twoWayMergeInternal(ris, isModifiable);
}
}
/**
* Union the given Memory image of the sketch.
*
*<p>This method can be repeatedly called. If the given sketch is null it is interpreted as an
* empty sketch.</p>
*
* @param mem Memory image of sketch to be merged
* @param serDe An instance of ArrayOfItemsSerDe
*/
public void update(final Memory mem, final ArrayOfItemsSerDe<T> serDe) {
if (mem == null) {
return;
}
ReservoirItemsSketch<T> ris = ReservoirItemsSketch.heapify(mem, serDe);
ris = (ris.getK() <= maxK_ ? ris : ris.downsampledCopy(maxK_));
if (gadget_ == null) {
createNewGadget(ris, true);
} else {
twoWayMergeInternal(ris, true);
}
}
/**
* Present this union with a single item to be added to the union.
*
* @param datum The given datum of type T.
*/
public void update(final T datum) {
if (datum == null) {
return;
}
if (gadget_ == null) {
gadget_ = ReservoirItemsSketch.newInstance(maxK_);
}
gadget_.update(datum);
}
/**
* Present this union with raw elements of a sketch. Useful when operating in a distributed
* environment like Pig Latin scripts, where an explicit SerDe may be overly complicated but
* keeping raw values is simple. Values are <em>not</em> copied and the input array may be
* modified.
*
* @param n Total items seen
* @param k Reservoir size
* @param input Reservoir samples
*/
public void update(final long n, final int k, final ArrayList<T> input) {
ReservoirItemsSketch<T> ris = ReservoirItemsSketch.newInstance(input, n,
ResizeFactor.X8, k); // forcing a resize factor
ris = (ris.getK() <= maxK_ ? ris : ris.downsampledCopy(maxK_));
if (gadget_ == null) {
createNewGadget(ris, true);
} else {
twoWayMergeInternal(ris, true);
}
}
/**
* Resets this Union. MaxK remains intact, otherwise reverts back to its virgin state.
*/
void reset() {
gadget_.reset();
}
/**
* Returns a sketch representing the current state of the union.
*
* @return The result of any unions already processed.
*/
public ReservoirItemsSketch<T> getResult() {
return (gadget_ != null ? gadget_.copy() : null);
}
/**
* Returns a byte array representation of this union
*
* @param serDe An instance of ArrayOfItemsSerDe
* @return a byte array representation of this union
*/
public byte[] toByteArray(final ArrayOfItemsSerDe<T> serDe) {
if ((gadget_ == null) || (gadget_.getNumSamples() == 0)) {
return toByteArray(serDe, null);
} else {
return toByteArray(serDe, gadget_.getValueAtPosition(0).getClass());
}
}
/**
* Returns a human-readable summary of the sketch, without items.
*
* @return A string version of the sketch summary
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
final String thisSimpleName = this.getClass().getSimpleName();
sb.append(LS);
sb.append("### ").append(thisSimpleName).append(" SUMMARY: ").append(LS);
sb.append(" Max k: ").append(maxK_).append(LS);
if (gadget_ == null) {
sb.append(" Gadget is null").append(LS);
} else {
sb.append(" Gadget summary: ").append(gadget_.toString());
}
sb.append("### END UNION SUMMARY").append(LS);
return sb.toString();
}
/**
* Returns a byte array representation of this union. This method should be used when the array
* elements are subclasses of a common base class.
*
* @param serDe An instance of ArrayOfItemsSerDe
* @param clazz A class to which the items are cast before serialization
* @return a byte array representation of this union
*/
// gadgetBytes will be null only if gadget_ == null AND empty == true
public byte[] toByteArray(final ArrayOfItemsSerDe<T> serDe, final Class<?> clazz) {
final int preLongs, outBytes;
final boolean empty = gadget_ == null;
final byte[] gadgetBytes = (gadget_ != null ? gadget_.toByteArray(serDe, clazz) : null);
if (empty) {
preLongs = Family.RESERVOIR_UNION.getMinPreLongs();
outBytes = 8;
} else {
preLongs = Family.RESERVOIR_UNION.getMaxPreLongs();
outBytes = (preLongs << 3) + gadgetBytes.length; // for longs, we know the size
}
final byte[] outArr = new byte[outBytes];
final WritableMemory mem = WritableMemory.writableWrap(outArr);
// build preLong
PreambleUtil.insertPreLongs(mem, preLongs); // Byte 0
PreambleUtil.insertSerVer(mem, SER_VER); // Byte 1
PreambleUtil.insertFamilyID(mem, Family.RESERVOIR_UNION.getID()); // Byte 2
if (empty) {
PreambleUtil.insertFlags(mem, EMPTY_FLAG_MASK);
} else {
PreambleUtil.insertFlags(mem, 0); // Byte 3
}
PreambleUtil.insertMaxK(mem, maxK_); // Bytes 4-5
if (!empty) {
final int preBytes = preLongs << 3;
mem.putByteArray(preBytes, gadgetBytes, 0, gadgetBytes.length);
}
return outArr;
}
private void createNewGadget(final ReservoirItemsSketch<T> sketchIn,
final boolean isModifiable) {
if ((sketchIn.getK() < maxK_) && (sketchIn.getN() <= sketchIn.getK())) {
// incoming sketch is in exact mode with sketch's k < maxK,
// so we can create a gadget at size maxK and keep everything
//Assumes twoWayMergeInternal first checks if sketchIn is in exact mode
gadget_ = ReservoirItemsSketch.newInstance(maxK_);
twoWayMergeInternal(sketchIn, isModifiable); // isModifiable could be fixed to false here
} else {
// use the input sketch as gadget, copying if needed
gadget_ = (isModifiable ? sketchIn : sketchIn.copy());
}
}
// We make a three-way classification of sketch states.
// "uni" when (n < k); source of unit weights, can only accept unit weights
// "mid" when (n == k); source of unit weights, can accept "light" general weights.
// "gen" when (n > k); source of general weights, can accept "light" general weights.
// source target status update notes
// ----------------------------------------------------------------------------------------------
// uni,mid uni okay standard target might transition to mid and gen
// uni,mid mid,gen okay standard target might transition to gen
// gen uni must swap N/A
// gen mid,gen maybe swap weighted N assumes fractional values during merge
// ----------------------------------------------------------------------------------------------
// Here is why in the (gen, gen) merge case, the items will be light enough in at least one
// direction:
// Obviously either (n_s/k_s <= n_t/k_t) OR (n_s/k_s >= n_t/k_t).
// WLOG say its the former, then (n_s/k_s < n_t/(k_t - 1)) provided n_t > 0 and k_t > 1
/**
* This either merges sketchIn into gadget_ or gadget_ into sketchIn. If merging into sketchIn
* with isModifiable set to false, copies elements from sketchIn first, leaving original
* unchanged.
*
* @param sketchIn Sketch with new samples from which to draw
* @param isModifiable Flag indicating whether sketchIn can be modified (e.g. if it was rebuild
* from Memory)
*/
private void twoWayMergeInternal(final ReservoirItemsSketch<T> sketchIn,
final boolean isModifiable) {
if (sketchIn.getN() <= sketchIn.getK()) {
twoWayMergeInternalStandard(sketchIn);
} else if (gadget_.getN() < gadget_.getK()) {
// merge into sketchIn, so swap first
final ReservoirItemsSketch<T> tmpSketch = gadget_;
gadget_ = (isModifiable ? sketchIn : sketchIn.copy());
twoWayMergeInternalStandard(tmpSketch);
} else if (sketchIn.getImplicitSampleWeight() < (gadget_.getN()
/ ((double) (gadget_.getK() - 1)))) {
// implicit weights in sketchIn are light enough to merge into gadget
twoWayMergeInternalWeighted(sketchIn);
} else {
// Use next line as an assert/exception?
// gadget_.getImplicitSampleWeight() < sketchIn.getN() / ((double) (sketchIn.getK() - 1))) {
// implicit weights in gadget are light enough to merge into sketchIn
// merge into sketchIn, so swap first
final ReservoirItemsSketch<T> tmpSketch = gadget_;
gadget_ = (isModifiable ? sketchIn : sketchIn.copy());
twoWayMergeInternalWeighted(tmpSketch);
}
}
// should be called ONLY by twoWayMergeInternal
private void twoWayMergeInternalStandard(final ReservoirItemsSketch<T> source) {
assert (source.getN() <= source.getK());
final int numInputSamples = source.getNumSamples();
for (int i = 0; i < numInputSamples; ++i) {
gadget_.update(source.getValueAtPosition(i));
}
}
// should be called ONLY by twoWayMergeInternal
private void twoWayMergeInternalWeighted(final ReservoirItemsSketch<T> source) {
// gadget_ capable of accepting (light) general weights
assert (gadget_.getN() >= gadget_.getK());
final int numSourceSamples = source.getK();
final double sourceItemWeight = (source.getN() / (double) numSourceSamples);
final double rescaled_prob = gadget_.getK() * sourceItemWeight; // K * weight
double targetTotal = gadget_.getN(); // assumes fractional values during merge
final int tgtK = gadget_.getK();
for (int i = 0; i < numSourceSamples; ++i) {
// inlining the update procedure, using targetTotal for the fractional N values
// similar to ReservoirLongsSketch.update()
// p(keep_new_item) = (k * w) / newTotal
// require p(keep_new_item) < 1.0, meaning strict lightness
targetTotal += sourceItemWeight;
final double rescaled_one = targetTotal;
assert (rescaled_prob < rescaled_one); // Use an exception to enforce strict lightness?
final double rescaled_flip = rescaled_one * SamplingUtil.rand().nextDouble();
if (rescaled_flip < rescaled_prob) {
// Intentionally NOT doing optimization to extract slot number from rescaled_flip.
// Grabbing new random bits to ensure all slots in play
final int slotNo = SamplingUtil.rand().nextInt(tgtK);
gadget_.insertValueAtPosition(source.getValueAtPosition(i), slotNo);
} // end of inlined weight update
} // end of loop over source samples
// targetTotal was fractional but should now be an integer again. Could validate with low
// tolerance, but for now just round to check.
final long checkN = (long) Math.floor(0.5 + targetTotal);
gadget_.forceIncrementItemsSeen(source.getN());
assert (checkN == gadget_.getN());
}
}
| 2,661 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/sampling/VarOptItemsSamples.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.sampling;
import java.util.ConcurrentModificationException;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.Objects;
/**
* This class provides access to the samples contained in a VarOptItemsSketch. It provides two
* mechanisms for access:
* <ul>
* <li>An <code>Iterator</code> over <code>WeightedSample</code> objects which can can be used to
* access both the items and weights in the sample, and which avoids copying data from the
* sketch.</li>
* <li>Getter methods to obtain items or weights as arrays, or individual items. These
* methods create a (shallow) copy of data from the sketch on the first call to any get
* method.</li>
* </ul>
*
* <p>If using getters with a sketch storing heterogeneous items from a polymorphic base class, you
* must call <code>setClass()</code> prior to calling one of the getter methods. This is not
* necessary if using the iterator.</p>
*
* <p>The class also implements <code>Iterable</code> to allow the use of forEach loops for
* convenience.</p>
*
* @param <T> an item of type T
*
* @author Jon Malkin
*/
public class VarOptItemsSamples<T> implements Iterable<VarOptItemsSamples<T>.WeightedSample> {
final VarOptItemsSketch<T> sketch_;
VarOptItemsSketch<T>.Result sampleLists;
final long n_;
final int h_;
final double rWeight_;
/**
* A convenience class to allow easy iterator access to a VarOpt sample.
*/
//@SuppressWarnings("synthetic-access")
public final class WeightedSample {
private final int idx_;
private double adjustedWeight_;
WeightedSample(final int i) {
idx_ = i;
adjustedWeight_ = Double.NaN;
}
WeightedSample(final int i, final double adjustedWeight) {
idx_ = i;
adjustedWeight_ = adjustedWeight;
}
/**
* Accesses the iterator's current object
* @return An item from the sketch's data sample
*/
public T getItem() {
return sketch_.getItem(idx_);
}
/**
* Accesses the iterator's current weight value
* @return A weight from the sketch's data sample
*/
public double getWeight() {
if (idx_ > h_) {
return Double.isNaN(adjustedWeight_) ? rWeight_ : adjustedWeight_;
} else {
return sketch_.getWeight(idx_);
}
}
// only used in resolving union gadget
boolean getMark() { return sketch_.getMark(idx_); }
}
/**
* The standard iterator
*/
//@SuppressWarnings("synthetic-access")
public class VarOptItemsIterator implements Iterator<WeightedSample> {
int currIdx_;
int finalIdx_; // inclusive final index
VarOptItemsIterator() {
currIdx_ = h_ == 0 ? 1 : 0;
final int k = sketch_.getK();
finalIdx_ = (int) (n_ <= k ? n_ - 1 : k); // -1 since finalIdx_ is inclusive
}
// package private iterator to crawl only H or only R region values
VarOptItemsIterator(final boolean useRRegion) {
if (useRRegion) {
currIdx_ = h_ + 1; // to handle the gap
finalIdx_ = sketch_.getNumSamples(); // no +1 since inclusive
} else {
currIdx_ = 0;
finalIdx_ = h_ - 1; // need stop before h_ since incluside
}
}
@Override
public boolean hasNext() {
// If sketch is in exact mode, we'll have a next item as long as index < k.
// If in sampling mode, the last index is k (array length k+1) but there will always be at
// least one item in R, so no need to check if the last element is null.
return currIdx_ <= finalIdx_;
}
@Override
public WeightedSample next() {
if (n_ != sketch_.getN()) {
throw new ConcurrentModificationException();
} else if (currIdx_ > finalIdx_) {
throw new NoSuchElementException();
}
// grab current index, apply logic to update currIdx_ for the next call
final int tgt = currIdx_;
++currIdx_;
if ((currIdx_ == h_) && (h_ != n_)) {
++currIdx_;
}
return new WeightedSample(tgt);
}
}
//@SuppressWarnings("synthetic-access")
class WeightCorrectingRRegionIterator extends VarOptItemsIterator {
private double cumWeight = 0.0;
WeightCorrectingRRegionIterator() {
super(true);
}
@Override
public WeightedSample next() {
if (n_ != sketch_.getN()) {
throw new ConcurrentModificationException();
} else if (currIdx_ > finalIdx_) {
throw new NoSuchElementException();
}
// grab current index, apply logic to update currIdx_ for the next call
final int tgt = currIdx_;
++currIdx_;
// only covers R region, no need to check for gap
final WeightedSample sample;
if (tgt == finalIdx_) {
sample = new WeightedSample(tgt, sketch_.getTotalWtR() - cumWeight);
} else {
sample = new WeightedSample(tgt);
cumWeight += rWeight_;
}
return sample;
}
}
VarOptItemsSamples(final VarOptItemsSketch<T> sketch) {
Objects.requireNonNull(sketch, "sketch must not be null");
sketch_ = sketch;
n_ = sketch.getN();
h_ = sketch.getHRegionCount();
rWeight_ = sketch.getTau();
}
@Override
public Iterator<WeightedSample> iterator() {
return new VarOptItemsIterator();
}
Iterator<WeightedSample> getHIterator() { return new VarOptItemsIterator(false); }
Iterator<WeightedSample> getRIterator() { return new VarOptItemsIterator(true); }
Iterator<WeightedSample> getWeightCorrRIter() { return new WeightCorrectingRRegionIterator(); }
/**
* Specifies the class to use when copying the item array from the sketch. This method is
* required if the sketch stores heterogeneous item types of some base class, for instance a
* sketch over <code>Number</code>s.
*
* @param clazz The class to use when creating the item array result
*/
public void setClass(final Class<?> clazz) {
if (sampleLists == null) {
sampleLists = sketch_.getSamplesAsArrays(clazz);
}
}
/**
* Returns the length Copies items and weights from the sketch, if necessary, and returns the
* length of
* any
* resulting array. The result will be 0 for an empty sketch.
*
* @return The number of items and weights in the sketch
*/
public int getNumSamples() {
loadArrays();
return (sampleLists == null || sampleLists.weights == null ? 0 : sampleLists.weights.length);
}
/**
* Returns a shallow copy of the array of sample items contained in the sketch. If this is the
* first getter call, copies data arrays from the sketch.
* @return The number of samples contained in the sketch.
*/
public T[] items() {
loadArrays();
return (sampleLists == null ? null : sampleLists.items);
}
/**
* Returns a single item from the samples contained in the sketch. Does not perform bounds
* checking on the input. If this is the first getter call, copies data arrays from the sketch.
* @param i An index into the list of samples
* @return The sample at array position <code>i</code>
*/
public T items(final int i) {
loadArrays();
return (sampleLists == null || sampleLists.items == null ? null : sampleLists.items[i]);
}
/**
* Returns a copy of the array of weights contained in the sketch. If this is the first
* getter call, copies data arrays from the sketch.
* @return The number of samples contained in the sketch.
*/
public double[] weights() {
loadArrays();
return (sampleLists == null ? null : sampleLists.weights);
}
/**
* Returns a single weight from the samples contained in the sketch. Does not perform bounds
* checking on the input. If this is the first getter call, copies data arrays from the sketch.
* @param i An index into the list of weights
* @return The weight at array position <code>i</code>
*/
public double weights(final int i) {
loadArrays();
return (sampleLists == null || sampleLists.weights == null ? Double.NaN : sampleLists.weights[i]);
}
private void loadArrays() {
if (sampleLists == null) {
sampleLists = sketch_.getSamplesAsArrays();
}
}
}
| 2,662 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/sampling/ReservoirItemsSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.sampling;
import static org.apache.datasketches.common.Util.LS;
import static org.apache.datasketches.sampling.PreambleUtil.EMPTY_FLAG_MASK;
import static org.apache.datasketches.sampling.PreambleUtil.FAMILY_BYTE;
import static org.apache.datasketches.sampling.PreambleUtil.SER_VER;
import static org.apache.datasketches.sampling.PreambleUtil.extractEncodedReservoirSize;
import static org.apache.datasketches.sampling.PreambleUtil.extractFlags;
import static org.apache.datasketches.sampling.PreambleUtil.extractK;
import static org.apache.datasketches.sampling.PreambleUtil.extractN;
import static org.apache.datasketches.sampling.PreambleUtil.extractPreLongs;
import static org.apache.datasketches.sampling.PreambleUtil.extractResizeFactor;
import static org.apache.datasketches.sampling.PreambleUtil.extractSerVer;
import static org.apache.datasketches.sampling.SamplingUtil.pseudoHypergeometricLBonP;
import static org.apache.datasketches.sampling.SamplingUtil.pseudoHypergeometricUBonP;
import java.lang.reflect.Array;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.function.Predicate;
import org.apache.datasketches.common.ArrayOfItemsSerDe;
import org.apache.datasketches.common.Family;
import org.apache.datasketches.common.ResizeFactor;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.common.SketchesStateException;
import org.apache.datasketches.common.Util;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
/**
* This sketch provides a reservoir sample over an input stream of items. The sketch contains a
* uniform random sample of unweighted items from the stream.
*
* @param <T> The type of object held in the reservoir.
*
* @author Jon Malkin
* @author Kevin Lang
*/
public final class ReservoirItemsSketch<T> {
/**
* The smallest sampling array allocated: 16
*/
private static final int MIN_LG_ARR_ITEMS = 4;
/**
* Using 48 bits to capture number of items seen, so sketch cannot process more after this
* many items capacity
*/
private static final long MAX_ITEMS_SEEN = 0xFFFFFFFFFFFFL;
/**
* Default sampling size multiple when reallocating storage: 8
*/
private static final ResizeFactor DEFAULT_RESIZE_FACTOR = ResizeFactor.X8;
private final int reservoirSize_; // max size of reservoir
private int currItemsAlloc_; // currently allocated array size
private long itemsSeen_; // number of items presented to sketch
private final ResizeFactor rf_; // resize factor
private ArrayList<T> data_; // stored sampled items
private ReservoirItemsSketch(final int k, final ResizeFactor rf) {
// required due to a theorem about lightness during merging
if (k < 2) {
throw new SketchesArgumentException("k must be at least 2");
}
reservoirSize_ = k;
rf_ = rf;
itemsSeen_ = 0;
final int ceilingLgK = Util.exactLog2OfInt(Util.ceilingIntPowerOf2(reservoirSize_), "reservoirSize_");
final int initialLgSize =
SamplingUtil.startingSubMultiple(ceilingLgK, rf_.lg(), MIN_LG_ARR_ITEMS);
currItemsAlloc_ = SamplingUtil.getAdjustedSize(reservoirSize_, 1 << initialLgSize);
data_ = new ArrayList<>(currItemsAlloc_);
}
/**
* Creates a fully-populated sketch. Used internally to avoid extraneous array allocation
* when deserializing.
* Uses size of items array to as initial array allocation.
*
* @param data Reservoir items as an <code>ArrayList<T></code>
* @param itemsSeen Number of items presented to the sketch so far
* @param rf <a href="{@docRoot}/resources/dictionary.html#resizeFactor">See Resize Factor</a>
* @param k Maximum size of reservoir
*/
private ReservoirItemsSketch(final ArrayList<T> data, final long itemsSeen,
final ResizeFactor rf, final int k) {
if (data == null) {
throw new SketchesArgumentException("Instantiating sketch with null reservoir");
}
if (k < 2) {
throw new SketchesArgumentException("Cannot instantiate sketch with reservoir size less than 2");
}
if (k < data.size()) {
throw new SketchesArgumentException("Instantiating sketch with max size less than array length: "
+ k + " max size, array of length " + data.size());
}
if (((itemsSeen >= k) && (data.size() < k))
|| ((itemsSeen < k) && (data.size() < itemsSeen))) {
throw new SketchesArgumentException("Instantiating sketch with too few samples. Items seen: "
+ itemsSeen + ", max reservoir size: " + k
+ ", items array length: " + data.size());
}
// Should we compute target current allocation to validate?
reservoirSize_ = k;
currItemsAlloc_ = data.size();
itemsSeen_ = itemsSeen;
rf_ = rf;
data_ = data;
}
/**
* Fast constructor for fully-specified sketch with no encoded/decoding size and no
* validation. Used with copy().
*
* @param k Maximum reservoir capacity
* @param currItemsAlloc Current array size (assumed equal to items.length)
* @param itemsSeen Total items seen by this sketch
* @param rf <a href="{@docRoot}/resources/dictionary.html#resizeFactor">See Resize Factor</a>
* @param data Data ArrayList backing the reservoir, will <em>not</em> be copied
*/
private ReservoirItemsSketch(final int k, final int currItemsAlloc, final long itemsSeen,
final ResizeFactor rf, final ArrayList<T> data) {
this.reservoirSize_ = k;
this.currItemsAlloc_ = currItemsAlloc;
this.itemsSeen_ = itemsSeen;
this.rf_ = rf;
this.data_ = data;
}
/**
* Construct a mergeable sampling sketch with up to k samples using the default resize
* factor (8).
*
* @param k Maximum size of sampling. Allocated size may be smaller until reservoir fills.
* Unlike many sketches in this package, this value does <em>not</em> need to be a
* power of 2.
* @param <T> The type of object held in the reservoir.
* @return A ReservoirLongsSketch initialized with maximum size k and the default resize factor.
*/
public static <T> ReservoirItemsSketch<T> newInstance(final int k) {
return new ReservoirItemsSketch<>(k, DEFAULT_RESIZE_FACTOR);
}
/**
* Construct a mergeable sampling sketch with up to k samples using a specified resize factor.
*
* @param k Maximum size of sampling. Allocated size may be smaller until reservoir fills.
* Unlike many sketches in this package, this value does <em>not</em> need to be a
* power of 2.
* @param rf <a href="{@docRoot}/resources/dictionary.html#resizeFactor">See Resize Factor</a>
* @param <T> The type of object held in the reservoir.
* @return A ReservoirLongsSketch initialized with maximum size k and resize factor rf.
*/
public static <T> ReservoirItemsSketch<T> newInstance(final int k, final ResizeFactor rf) {
return new ReservoirItemsSketch<>(k, rf);
}
/**
* Thin wrapper around private constructor
*
* @param <T> data type
* @param data Reservoir items as ArrayList<T>
* @param itemsSeen Number of items presented to the sketch so far
* @param rf <a href="{@docRoot}/resources/dictionary.html#resizeFactor">See Resize Factor</a>
* @param k Compact encoding of reservoir size
* @return New sketch built with the provided inputs
*/
static <T> ReservoirItemsSketch<T> newInstance(final ArrayList<T> data, final long itemsSeen,
final ResizeFactor rf, final int k) {
return new ReservoirItemsSketch<>(data, itemsSeen, rf, k);
}
/**
* Returns a sketch instance of this class from the given srcMem,
* which must be a Memory representation of this sketch class.
*
* @param <T> The type of item this sketch contains
* @param srcMem a Memory representation of a sketch of this class.
* <a href="{@docRoot}/resources/dictionary.html#mem">See Memory</a>
* @param serDe An instance of ArrayOfItemsSerDe
* @return a sketch instance of this class
*/
public static <T> ReservoirItemsSketch<T> heapify(final Memory srcMem,
final ArrayOfItemsSerDe<T> serDe) {
Family.RESERVOIR.checkFamilyID(srcMem.getByte(FAMILY_BYTE));
final int numPreLongs = extractPreLongs(srcMem);
final ResizeFactor rf = ResizeFactor.getRF(extractResizeFactor(srcMem));
final int serVer = extractSerVer(srcMem);
final boolean isEmpty = (extractFlags(srcMem) & EMPTY_FLAG_MASK) != 0;
final long itemsSeen = (isEmpty ? 0 : extractN(srcMem));
int k = extractK(srcMem);
// Check values
final boolean preLongsEqMin = (numPreLongs == Family.RESERVOIR.getMinPreLongs());
final boolean preLongsEqMax = (numPreLongs == Family.RESERVOIR.getMaxPreLongs());
if (!preLongsEqMin && !preLongsEqMax) {
throw new SketchesArgumentException(
"Possible corruption: Non-empty sketch with only "
+ Family.RESERVOIR.getMinPreLongs() + " preLong(s)");
}
if (serVer != SER_VER) {
if (serVer == 1) {
final short encK = extractEncodedReservoirSize(srcMem);
k = ReservoirSize.decodeValue(encK);
} else {
throw new SketchesArgumentException(
"Possible Corruption: Ser Ver must be " + SER_VER + ": " + serVer);
}
}
if (isEmpty) {
return new ReservoirItemsSketch<>(k, rf);
}
final int preLongBytes = numPreLongs << 3;
int allocatedItems = k; // default to full reservoir
if (itemsSeen < k) {
// under-full so determine size to allocate, using ceilingLog2(totalSeen) as minimum
// casts to int are safe since under-full
final int ceilingLgK = Util.exactLog2OfInt(Util.ceilingIntPowerOf2(k), "heapify");
final int minLgSize = Util.exactLog2OfInt(Util.ceilingIntPowerOf2((int) itemsSeen), "heapify");
final int initialLgSize = SamplingUtil.startingSubMultiple(ceilingLgK, rf.lg(),
Math.max(minLgSize, MIN_LG_ARR_ITEMS));
allocatedItems = SamplingUtil.getAdjustedSize(k, 1 << initialLgSize);
}
final int itemsToRead = (int) Math.min(k, itemsSeen);
final T[] data = serDe.deserializeFromMemory(srcMem, preLongBytes, itemsToRead);
final ArrayList<T> dataList = new ArrayList<>(Arrays.asList(data));
final ReservoirItemsSketch<T> ris = new ReservoirItemsSketch<>(dataList, itemsSeen, rf, k);
ris.data_.ensureCapacity(allocatedItems);
ris.currItemsAlloc_ = allocatedItems;
return ris;
}
/**
* Returns the sketch's value of <i>k</i>, the maximum number of samples stored in the
* reservoir. The current number of items in the sketch may be lower.
*
* @return k, the maximum number of samples in the reservoir
*/
public int getK() {
return reservoirSize_;
}
/**
* Returns the number of items processed from the input stream
*
* @return n, the number of stream items the sketch has seen
*/
public long getN() {
return itemsSeen_;
}
/**
* Returns the current number of items in the reservoir, which may be smaller than the
* reservoir capacity.
*
* @return the number of items currently in the reservoir
*/
public int getNumSamples() {
return (int) Math.min(reservoirSize_, itemsSeen_);
}
/**
* Randomly decide whether or not to include an item in the sample set.
*
* @param item a unit-weight (equivalently, unweighted) item of the set being sampled from
*/
public void update(final T item) {
if (itemsSeen_ == MAX_ITEMS_SEEN) {
throw new SketchesStateException("Sketch has exceeded capacity for total items seen: "
+ MAX_ITEMS_SEEN);
}
if (item == null) {
return;
}
if (itemsSeen_ < reservoirSize_) { // initial phase, take the first reservoirSize_ items
if (itemsSeen_ >= currItemsAlloc_) {
growReservoir();
}
assert itemsSeen_ < currItemsAlloc_;
// we'll randomize replacement positions, so in-order should be valid for now
data_.add(item);
++itemsSeen_;
} else { // code for steady state where we sample randomly
++itemsSeen_;
// prob(keep_item) < k / n = reservoirSize_ / itemsSeen_
// so multiply to get: keep if rand * itemsSeen_ < reservoirSize_
if ((SamplingUtil.rand().nextDouble() * itemsSeen_) < reservoirSize_) {
final int newSlot = SamplingUtil.rand().nextInt(reservoirSize_);
data_.set(newSlot, item);
}
}
}
/**
* Resets this sketch to the empty state, but retains the original value of k.
*/
public void reset() {
final int ceilingLgK = Util.exactLog2OfInt(Util.ceilingIntPowerOf2(reservoirSize_), "ReservoirItemsSketch");
final int initialLgSize =
SamplingUtil.startingSubMultiple(ceilingLgK, rf_.lg(), MIN_LG_ARR_ITEMS);
currItemsAlloc_ = SamplingUtil.getAdjustedSize(reservoirSize_, 1 << initialLgSize);
data_ = new ArrayList<>(currItemsAlloc_);
itemsSeen_ = 0;
}
/**
* Returns a copy of the items in the reservoir, or null if empty. The returned array length
* may be smaller than the reservoir capacity.
*
* <p>In order to allocate an array of generic type T, uses the class of the first item in
* the array. This method method may throw an <code>ArrayAssignmentException</code> if the
* reservoir stores instances of a polymorphic base class.</p>
*
* @return A copy of the reservoir array
*/
@SuppressWarnings("unchecked")
public T[] getSamples() {
if (itemsSeen_ == 0) {
return null;
}
final Class<?> clazz = data_.get(0).getClass();
return data_.toArray((T[]) Array.newInstance(clazz, 0));
}
/**
* Returns a copy of the items in the reservoir as members of Class <em>clazz</em>, or null
* if empty. The returned array length may be smaller than the reservoir capacity.
*
* <p>This method allocates an array of class <em>clazz</em>, which must either match or
* extend T. This method should be used when objects in the array are all instances of T but
* are not necessarily instances of the base class.</p>
*
* @param clazz A class to which the items are cast before returning
* @return A copy of the reservoir array
*/
@SuppressWarnings("unchecked")
public T[] getSamples(final Class<?> clazz) {
if (itemsSeen_ == 0) {
return null;
}
return data_.toArray((T[]) Array.newInstance(clazz, 0));
}
/**
* Returns the actual List backing the reservoir. <em>Any changes to this List will corrupt
* the reservoir sample.</em>
*
* <p>This method should be used only when making a copy of the returned samples, to avoid
* an extraneous array copy.</p>
*
* @return The raw array backing this reservoir.
*/
ArrayList<T> getRawSamplesAsList() {
return data_;
}
/**
* Returns a human-readable summary of the sketch, without items.
*
* @return A string version of the sketch summary
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
final String thisSimpleName = this.getClass().getSimpleName();
sb.append(LS);
sb.append("### ").append(thisSimpleName).append(" SUMMARY: ").append(LS);
sb.append(" k : ").append(reservoirSize_).append(LS);
sb.append(" n : ").append(itemsSeen_).append(LS);
sb.append(" Current size : ").append(currItemsAlloc_).append(LS);
sb.append(" Resize factor: ").append(rf_).append(LS);
sb.append("### END SKETCH SUMMARY").append(LS);
return sb.toString();
}
/**
* Returns a human readable string of the preamble of a byte array image of a ReservoirItemsSketch.
* @param byteArr the given byte array
* @return a human readable string of the preamble of a byte array image of a ReservoirItemsSketch.
*/
public static String toString(final byte[] byteArr) {
return PreambleUtil.preambleToString(byteArr);
}
/**
* Returns a human readable string of the preamble of a Memory image of a ReservoirItemsSketch.
* @param mem the given Memory
* @return a human readable string of the preamble of a Memory image of a ReservoirItemsSketch.
*/
public static String toString(final Memory mem) {
return PreambleUtil.preambleToString(mem);
}
/**
* Returns a byte array representation of this sketch. May fail for polymorphic item types.
*
* @param serDe An instance of ArrayOfItemsSerDe
* @return a byte array representation of this sketch
*/
public byte[] toByteArray(final ArrayOfItemsSerDe<? super T> serDe) {
if (itemsSeen_ == 0) {
// null class is ok since empty -- no need to call serDe
return toByteArray(serDe, null);
} else {
return toByteArray(serDe, data_.get(0).getClass());
}
}
/**
* Returns a byte array representation of this sketch. Copies contents into an array of the
* specified class for serialization to allow for polymorphic types.
*
* @param serDe An instance of ArrayOfItemsSerDe
* @param clazz The class represented by <T>
* @return a byte array representation of this sketch
*/
// bytes will be null only if empty == true
public byte[] toByteArray(final ArrayOfItemsSerDe<? super T> serDe, final Class<?> clazz) {
final int preLongs, outBytes;
final boolean empty = itemsSeen_ == 0;
byte[] bytes = null; // for serialized items from serDe
if (empty) {
preLongs = 1;
outBytes = 8;
} else {
preLongs = Family.RESERVOIR.getMaxPreLongs();
bytes = serDe.serializeToByteArray(getSamples(clazz));
outBytes = (preLongs << 3) + bytes.length;
}
final byte[] outArr = new byte[outBytes];
final WritableMemory mem = WritableMemory.writableWrap(outArr);
// Common header elements
PreambleUtil.insertPreLongs(mem, preLongs); // Byte 0
PreambleUtil.insertLgResizeFactor(mem, rf_.lg());
PreambleUtil.insertSerVer(mem, SER_VER); // Byte 1
PreambleUtil.insertFamilyID(mem, Family.RESERVOIR.getID()); // Byte 2
if (empty) {
PreambleUtil.insertFlags(mem, EMPTY_FLAG_MASK); // Byte 3
} else {
PreambleUtil.insertFlags(mem, 0);
}
PreambleUtil.insertK(mem, reservoirSize_); // Bytes 4-7
// conditional elements
if (!empty) {
PreambleUtil.insertN(mem, itemsSeen_);
// insert the bytearray of serialized samples, offset by the preamble size
final int preBytes = preLongs << 3;
mem.putByteArray(preBytes, bytes, 0, bytes.length);
}
return outArr;
}
/**
* Computes an estimated subset sum from the entire stream for objects matching a given
* predicate. Provides a lower bound, estimate, and upper bound using a target of 2 standard
* deviations.
*
* <p>This is technically a heuristic method, and tries to err on the conservative side.</p>
*
* @param predicate A predicate to use when identifying items.
* @return A summary object containing the estimate, upper and lower bounds, and the total
* sketch weight.
*/
public SampleSubsetSummary estimateSubsetSum(final Predicate<T> predicate) {
if (itemsSeen_ == 0) {
return new SampleSubsetSummary(0.0, 0.0, 0.0, 0.0);
}
final int numSamples = getNumSamples();
final double samplingRate = numSamples / (double) itemsSeen_;
assert samplingRate >= 0.0;
assert samplingRate <= 1.0;
int trueCount = 0;
for (int i = 0; i < numSamples; ++i) {
if (predicate.test(data_.get(i))) {
++trueCount;
}
}
// if in exact mode, we can return an exact answer
if (itemsSeen_ <= reservoirSize_) {
return new SampleSubsetSummary(trueCount, trueCount, trueCount, numSamples);
}
final double lbTrueFraction = pseudoHypergeometricLBonP(numSamples, trueCount, samplingRate);
final double estimatedTrueFraction = (1.0 * trueCount) / numSamples;
final double ubTrueFraction = pseudoHypergeometricUBonP(numSamples, trueCount, samplingRate);
return new SampleSubsetSummary(
itemsSeen_ * lbTrueFraction,
itemsSeen_ * estimatedTrueFraction,
itemsSeen_ * ubTrueFraction,
itemsSeen_);
}
double getImplicitSampleWeight() {
if (itemsSeen_ < reservoirSize_) {
return 1.0;
} else {
return ((1.0 * itemsSeen_) / reservoirSize_);
}
}
/**
* Useful during union operations to avoid copying the items array around if only updating a
* few points.
*
* @param pos The position from which to retrieve the element
* @return The value in the reservoir at position <code>pos</code>
*/
T getValueAtPosition(final int pos) {
if (itemsSeen_ == 0) {
throw new SketchesArgumentException("Requested element from empty reservoir.");
} else if ((pos < 0) || (pos >= getNumSamples())) {
throw new SketchesArgumentException("Requested position must be between 0 and "
+ getNumSamples() + ", " + "inclusive. Received: " + pos);
}
return data_.get(pos);
}
/**
* Useful during union operation to force-insert a value into the union gadget. Does
* <em>NOT</em> increment count of items seen.
*
* @param value The entry to store in the reservoir
* @param pos The position at which to store the entry
*/
void insertValueAtPosition(final T value, final int pos) {
if ((pos < 0) || (pos >= getNumSamples())) {
throw new SketchesArgumentException("Insert position must be between 0 and "
+ getNumSamples() + ", " + "inclusive. Received: " + pos);
}
data_.set(pos, value);
}
/**
* Used during union operations to update count of items seen. Does <em>NOT</em> check sign,
* but will throw an exception if the final result exceeds the maximum possible items seen
* value.
*
* @param inc The value added
*/
void forceIncrementItemsSeen(final long inc) {
itemsSeen_ += inc;
if (itemsSeen_ > MAX_ITEMS_SEEN) {
throw new SketchesStateException("Sketch has exceeded capacity for total items seen. "
+ "Limit: " + MAX_ITEMS_SEEN + ", found: " + itemsSeen_);
}
}
/**
* Used during union operations to ensure we do not overwrite an existing reservoir. Creates a
* shallow copy of the reservoir.
*
* @return A copy of the current sketch
*/
@SuppressWarnings("unchecked")
ReservoirItemsSketch<T> copy() {
return new ReservoirItemsSketch<>(reservoirSize_, currItemsAlloc_,
itemsSeen_, rf_, (ArrayList<T>) data_.clone());
}
// Note: the downsampling approach may appear strange but avoids several edge cases
// Q1: Why not just permute samples and then take the first "newK" of them?
// A1: We're assuming the sketch source is read-only
// Q2: Why not copy the source sketch, permute samples, then truncate the sample array and
// reduce k?
// A2: That would involve allocating memory proportional to the old k. Even if only a
// temporary violation of maxK, we're avoiding violating it at all.
ReservoirItemsSketch<T> downsampledCopy(final int maxK) {
final ReservoirItemsSketch<T> ris = new ReservoirItemsSketch<>(maxK, rf_);
for (final T item : getSamples()) {
// Pretending old implicit weights are all 1. Not true in general, but they're all
// equal so update should work properly as long as we update itemsSeen_ at the end.
ris.update(item);
}
// need to adjust number seen to get correct new implicit weights
if (ris.getN() < itemsSeen_) {
ris.forceIncrementItemsSeen(itemsSeen_ - ris.getN());
}
return ris;
}
/**
* Increases allocated sampling size by (adjusted) ResizeFactor and copies items from old
* sampling.
*/
private void growReservoir() {
currItemsAlloc_ = SamplingUtil.getAdjustedSize(reservoirSize_, currItemsAlloc_ << rf_.lg());
data_.ensureCapacity(currItemsAlloc_);
}
}
| 2,663 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/sampling/ReservoirLongsSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.sampling;
import static org.apache.datasketches.common.Util.LS;
import static org.apache.datasketches.sampling.PreambleUtil.EMPTY_FLAG_MASK;
import static org.apache.datasketches.sampling.PreambleUtil.FAMILY_BYTE;
import static org.apache.datasketches.sampling.PreambleUtil.SER_VER;
import static org.apache.datasketches.sampling.PreambleUtil.extractEncodedReservoirSize;
import static org.apache.datasketches.sampling.PreambleUtil.extractFlags;
import static org.apache.datasketches.sampling.PreambleUtil.extractK;
import static org.apache.datasketches.sampling.PreambleUtil.extractN;
import static org.apache.datasketches.sampling.PreambleUtil.extractPreLongs;
import static org.apache.datasketches.sampling.PreambleUtil.extractResizeFactor;
import static org.apache.datasketches.sampling.PreambleUtil.extractSerVer;
import static org.apache.datasketches.sampling.SamplingUtil.pseudoHypergeometricLBonP;
import static org.apache.datasketches.sampling.SamplingUtil.pseudoHypergeometricUBonP;
import java.util.Arrays;
import java.util.function.Predicate;
import org.apache.datasketches.common.Family;
import org.apache.datasketches.common.ResizeFactor;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.common.SketchesStateException;
import org.apache.datasketches.common.Util;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
/**
* This sketch provides a reservoir sample over an input stream of <code>long</code>s. The sketch
* contains a uniform random sample of items from the stream.
*
* @author Jon Malkin
* @author Kevin Lang
*/
public final class ReservoirLongsSketch {
/**
* The smallest sampling array allocated: 16
*/
private static final int MIN_LG_ARR_LONGS = 4;
/**
* Using 48 bits to capture number of items seen, so sketch cannot process more after this many
* items capacity
*/
private static final long MAX_ITEMS_SEEN = 0xFFFFFFFFFFFFL;
/**
* Default sampling size multiple when reallocating storage: 8
*/
private static final ResizeFactor DEFAULT_RESIZE_FACTOR = ResizeFactor.X8;
private final int reservoirSize_; // max size of sampling
private int currItemsAlloc_; // currently allocated array size
private long itemsSeen_; // number of items presented to sketch
private final ResizeFactor rf_; // resize factor
private long[] data_; // stored sampling items
/**
* The basic constructor for building an empty sketch.
*
* @param k Target maximum reservoir size
* @param rf <a href="{@docRoot}/resources/dictionary.html#resizeFactor">See Resize Factor</a>
*/
private ReservoirLongsSketch(final int k, final ResizeFactor rf) {
// required due to a theorem about lightness during merging
if (k < 2) {
throw new SketchesArgumentException("k must be at least 2");
}
reservoirSize_ = k;
rf_ = rf;
itemsSeen_ = 0;
final int ceilingLgK = Util.exactLog2OfInt(Util.ceilingIntPowerOf2(reservoirSize_), "ReservoirLongsSketch");
final int initialLgSize =
SamplingUtil.startingSubMultiple(ceilingLgK, rf_.lg(), MIN_LG_ARR_LONGS);
currItemsAlloc_ = SamplingUtil.getAdjustedSize(reservoirSize_, 1 << initialLgSize);
data_ = new long[currItemsAlloc_];
java.util.Arrays.fill(data_, 0L);
}
/**
* Creates a fully-populated sketch. Used internally to avoid extraneous array allocation when
* deserializing. Uses size of items array to as initial array allocation.
*
* @param data Reservoir items as long[]
* @param itemsSeen Number of items presented to the sketch so far
* @param rf <a href="{@docRoot}/resources/dictionary.html#resizeFactor">See Resize Factor</a>
* @param k Maximum reservoir size
*/
private ReservoirLongsSketch(final long[] data, final long itemsSeen, final ResizeFactor rf,
final int k) {
if (data == null) {
throw new SketchesArgumentException("Instantiating sketch with null reservoir");
}
if (k < 2) {
throw new SketchesArgumentException(
"Cannot instantiate sketch with reservoir size less than 2");
}
if (k < data.length) {
throw new SketchesArgumentException(
"Instantiating sketch with max size less than array length: " + k
+ " max size, array of length " + data.length);
}
if (((itemsSeen >= k) && (data.length < k))
|| ((itemsSeen < k) && (data.length < itemsSeen))) {
throw new SketchesArgumentException("Instantiating sketch with too few samples. "
+ "Items seen: " + itemsSeen + ", max reservoir size: " + k + ", "
+ "items array length: " + data.length);
}
reservoirSize_ = k;
currItemsAlloc_ = data.length;
itemsSeen_ = itemsSeen;
rf_ = rf;
data_ = data;
}
/**
* Fast constructor for full-specified sketch with no encoded/decoding size and no validation.
* Used with copy().
*
* @param k Maximum reservoir capacity
* @param currItemsAlloc Current array size (assumed equal to items.length)
* @param itemsSeen Total items seen by this sketch
* @param rf <a href="{@docRoot}/resources/dictionary.html#resizeFactor">See Resize Factor</a>
* @param data Data array backing the reservoir, will <em>not</em> be copied
*/
private ReservoirLongsSketch(final int k, final int currItemsAlloc,
final long itemsSeen, final ResizeFactor rf, final long[] data) {
reservoirSize_ = k;
currItemsAlloc_ = currItemsAlloc;
itemsSeen_ = itemsSeen;
rf_ = rf;
data_ = data;
}
/**
* Construct a mergeable reservoir sampling sketch with up to k samples using the default resize
* factor (8).
*
* @param k Maximum size of sampling. Allocated size may be smaller until sampling fills. Unlike
* many sketches in this package, this value does <em>not</em> need to be a power of 2.
* @return A ReservoirLongsSketch initialized with maximum size k and the default resize factor.
*/
public static ReservoirLongsSketch newInstance(final int k) {
return new ReservoirLongsSketch(k, DEFAULT_RESIZE_FACTOR);
}
/**
* Construct a mergeable reservoir sampling sketch with up to k samples using the default resize
* factor (8).
*
* @param k Maximum size of sampling. Allocated size may be smaller until sampling fills. Unlike
* many sketches in this package, this value does <em>not</em> need to be a power of 2.
* @param rf <a href="{@docRoot}/resources/dictionary.html#resizeFactor">See Resize Factor</a>
* @return A ReservoirLongsSketch initialized with maximum size k and ResizeFactor rf.
*/
public static ReservoirLongsSketch newInstance(final int k, final ResizeFactor rf) {
return new ReservoirLongsSketch(k, rf);
}
/**
* Returns a sketch instance of this class from the given srcMem, which must be a Memory
* representation of this sketch class.
*
* @param srcMem a Memory representation of a sketch of this class. <a href=
* "{@docRoot}/resources/dictionary.html#mem">See Memory</a>
* @return a sketch instance of this class
*/
public static ReservoirLongsSketch heapify(final Memory srcMem) {
Family.RESERVOIR.checkFamilyID(srcMem.getByte(FAMILY_BYTE));
final int numPreLongs = extractPreLongs(srcMem);
final ResizeFactor rf = ResizeFactor.getRF(extractResizeFactor(srcMem));
final int serVer = extractSerVer(srcMem);
final boolean isEmpty = (extractFlags(srcMem) & EMPTY_FLAG_MASK) != 0;
final long itemsSeen = (isEmpty ? 0 : extractN(srcMem));
int k = extractK(srcMem);
// Check values
final boolean preLongsEqMin = (numPreLongs == Family.RESERVOIR.getMinPreLongs());
final boolean preLongsEqMax = (numPreLongs == Family.RESERVOIR.getMaxPreLongs());
if (!preLongsEqMin && !preLongsEqMax) {
throw new SketchesArgumentException("Possible corruption: Non-empty sketch with only "
+ Family.RESERVOIR.getMinPreLongs() + "preLongs");
}
if (serVer != SER_VER) {
if (serVer == 1) {
final short encK = extractEncodedReservoirSize(srcMem);
k = ReservoirSize.decodeValue(encK);
} else {
throw new SketchesArgumentException(
"Possible Corruption: Ser Ver must be " + SER_VER + ": " + serVer);
}
}
if (isEmpty) {
return new ReservoirLongsSketch(k, rf);
}
final int preLongBytes = numPreLongs << 3;
final int numSketchLongs = (int) Math.min(itemsSeen, k);
int allocatedSize = k; // default to full reservoir
if (itemsSeen < k) {
// under-full so determine size to allocate, using ceilingLog2(totalSeen) as minimum
// casts to int are safe since under-full
final int ceilingLgK = Util.exactLog2OfInt(Util.ceilingIntPowerOf2(k), "k");
final int minLgSize = Util.exactLog2OfInt(Util.ceilingIntPowerOf2((int) itemsSeen), "heapify");
final int initialLgSize = SamplingUtil.startingSubMultiple(ceilingLgK, rf.lg(),
Math.max(minLgSize, MIN_LG_ARR_LONGS));
allocatedSize = SamplingUtil.getAdjustedSize(k, 1 << initialLgSize);
}
final long[] data = new long[allocatedSize];
srcMem.getLongArray(preLongBytes, data, 0, numSketchLongs);
return new ReservoirLongsSketch(data, itemsSeen, rf, k);
}
/**
* Thin wrapper around private constructor
*
* @param data Reservoir items as long[]
* @param itemsSeen Number of items presented to the sketch so far
* @param rf <a href="{@docRoot}/resources/dictionary.html#resizeFactor">See Resize Factor</a>
* @param k Maximum reservoir size
* @return New sketch built with the provided inputs
*/
static ReservoirLongsSketch getInstance(final long[] data, final long itemsSeen,
final ResizeFactor rf, final int k) {
return new ReservoirLongsSketch(data, itemsSeen, rf, k);
}
/**
* Returns the sketch's value of <i>k</i>, the maximum number of samples stored in the reservoir.
* The current number of items in the sketch may be lower.
*
* @return k, the maximum number of samples in the reservoir
*/
public int getK() {
return reservoirSize_;
}
/**
* Returns the number of items processed from the input stream
*
* @return n, the number of stream items the sketch has seen
*/
public long getN() {
return itemsSeen_;
}
/**
* Returns the current number of items in the reservoir, which may be smaller than the reservoir
* capacity.
*
* @return the number of items currently in the reservoir
*/
public int getNumSamples() {
return (int) Math.min(reservoirSize_, itemsSeen_);
}
/**
* Returns a copy of the items in the reservoir. The returned array length may be smaller than the
* reservoir capacity.
*
* @return A copy of the reservoir array
*/
public long[] getSamples() {
if (itemsSeen_ == 0) {
return null;
}
final int numSamples = (int) Math.min(reservoirSize_, itemsSeen_);
return java.util.Arrays.copyOf(data_, numSamples);
}
/**
* Randomly decide whether or not to include an item in the sample set.
*
* @param item a unit-weight (equivalently, unweighted) item of the set being sampled from
*/
public void update(final long item) {
if (itemsSeen_ == MAX_ITEMS_SEEN) {
throw new SketchesStateException(
"Sketch has exceeded capacity for total items seen: " + MAX_ITEMS_SEEN);
}
if (itemsSeen_ < reservoirSize_) { // initial phase, take the first reservoirSize_ items
if (itemsSeen_ >= currItemsAlloc_) {
growReservoir();
}
assert itemsSeen_ < currItemsAlloc_;
// we'll randomize replacement positions, so in-order should be valid for now
data_[(int) itemsSeen_] = item; // since less than reservoir size, cast is safe
++itemsSeen_;
} else { // code for steady state where we sample randomly
++itemsSeen_;
// prob(keep_item) < k / n = reservoirSize_ / itemsSeen_
// so multiply to get: keep if rand * itemsSeen_ < reservoirSize_
if ((SamplingUtil.rand().nextDouble() * itemsSeen_) < reservoirSize_) {
final int newSlot = SamplingUtil.rand().nextInt(reservoirSize_);
data_[newSlot] = item;
}
}
}
/**
* Resets this sketch to the empty state, but retains the original value of k.
*/
public void reset() {
final int ceilingLgK = Util.exactLog2OfInt(Util.ceilingIntPowerOf2(reservoirSize_),
"ReservoirLongsSketch");
final int initialLgSize =
SamplingUtil.startingSubMultiple(ceilingLgK, rf_.lg(), MIN_LG_ARR_LONGS);
currItemsAlloc_ = SamplingUtil.getAdjustedSize(reservoirSize_, 1 << initialLgSize);
data_ = new long[currItemsAlloc_];
itemsSeen_ = 0;
}
/**
* Returns a human-readable summary of the sketch, without items.
*
* @return A string version of the sketch summary
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
final String thisSimpleName = this.getClass().getSimpleName();
sb.append(LS);
sb.append("### ").append(thisSimpleName).append(" SUMMARY: ").append(LS);
sb.append(" k : ").append(reservoirSize_).append(LS);
sb.append(" n : ").append(itemsSeen_).append(LS);
sb.append(" Current size : ").append(currItemsAlloc_).append(LS);
sb.append(" Resize factor: ").append(rf_).append(LS);
sb.append("### END SKETCH SUMMARY").append(LS);
return sb.toString();
}
/**
* Returns a human readable string of the preamble of a byte array image of a ReservoirLongsSketch.
* @param byteArr the given byte array
* @return a human readable string of the preamble of a byte array image of a ReservoirLongsSketch.
*/
public static String toString(final byte[] byteArr) {
return PreambleUtil.preambleToString(byteArr);
}
/**
* Returns a human readable string of the preamble of a Memory image of a ReservoirLongsSketch.
* @param mem the given Memory
* @return a human readable string of the preamble of a Memory image of a ReservoirLongsSketch.
*/
public static String toString(final Memory mem) {
return PreambleUtil.preambleToString(mem);
}
/**
* Returns a byte array representation of this sketch
*
* @return a byte array representation of this sketch
*/
public byte[] toByteArray() {
final int preLongs, outBytes;
final boolean empty = itemsSeen_ == 0;
final int numItems = (int) Math.min(reservoirSize_, itemsSeen_);
if (empty) {
preLongs = 1;
outBytes = 8;
} else {
preLongs = Family.RESERVOIR.getMaxPreLongs();
outBytes = (preLongs + numItems) << 3; // for longs, we know the size
}
final byte[] outArr = new byte[outBytes];
final WritableMemory mem = WritableMemory.writableWrap(outArr);
// build first preLong
PreambleUtil.insertPreLongs(mem, preLongs); // Byte 0
PreambleUtil.insertLgResizeFactor(mem, rf_.lg());
PreambleUtil.insertSerVer(mem, SER_VER); // Byte 1
PreambleUtil.insertFamilyID(mem, Family.RESERVOIR.getID()); // Byte 2
if (empty) {
PreambleUtil.insertFlags(mem, EMPTY_FLAG_MASK); // Byte 3
} else {
PreambleUtil.insertFlags(mem, 0);
}
PreambleUtil.insertK(mem, reservoirSize_); // Bytes 4-7
if (!empty) {
// second preLong, only if non-empty
PreambleUtil.insertN(mem, itemsSeen_);
// insert the serialized samples, offset by the preamble size
final int preBytes = preLongs << 3;
mem.putLongArray(preBytes, data_, 0, numItems);
}
return outArr;
}
/**
* Computes an estimated subset sum from the entire stream for objects matching a given
* predicate. Provides a lower bound, estimate, and upper bound using a target of 2 standard
* deviations.
*
* <p>This is technically a heuristic method, and tries to err on the conservative side.</p>
*
* @param predicate A predicate to use when identifying items.
* @return A summary object containing the estimate, upper and lower bounds, and the total
* sketch weight.
*/
public SampleSubsetSummary estimateSubsetSum(final Predicate<Long> predicate) {
if (itemsSeen_ == 0) {
return new SampleSubsetSummary(0.0, 0.0, 0.0, 0.0);
}
final int numSamples = getNumSamples();
final double samplingRate = numSamples / (double) itemsSeen_;
assert samplingRate >= 0.0;
assert samplingRate <= 1.0;
int predTrueCount = 0;
for (int i = 0; i < numSamples; ++i) {
if (predicate.test(data_[i])) {
++predTrueCount;
}
}
// if in exact mode, we can return an exact answer
if (itemsSeen_ <= reservoirSize_) {
return new SampleSubsetSummary(predTrueCount, predTrueCount, predTrueCount, numSamples);
}
final double lbTrueFraction = pseudoHypergeometricLBonP(numSamples, predTrueCount, samplingRate);
final double estimatedTrueFraction = (1.0 * predTrueCount) / numSamples;
final double ubTrueFraction = pseudoHypergeometricUBonP(numSamples, predTrueCount, samplingRate);
return new SampleSubsetSummary(
itemsSeen_ * lbTrueFraction,
itemsSeen_ * estimatedTrueFraction,
itemsSeen_ * ubTrueFraction,
itemsSeen_);
}
double getImplicitSampleWeight() {
if (itemsSeen_ < reservoirSize_) {
return 1.0;
} else {
return ((1.0 * itemsSeen_) / reservoirSize_);
}
}
/**
* Useful during union operations to avoid copying the items array around if only updating a few
* points.
*
* @param pos The position from which to retrieve the element
* @return The value in the reservoir at position <code>pos</code>
*/
long getValueAtPosition(final int pos) {
if (itemsSeen_ == 0) {
throw new SketchesArgumentException("Requested element from empty reservoir.");
} else if ((pos < 0) || (pos >= getNumSamples())) {
throw new SketchesArgumentException("Requested position must be between 0 and "
+ (getNumSamples() - 1) + ", inclusive. Received: " + pos);
}
return data_[pos];
}
/**
* Useful during union operation to force-insert a value into the union gadget. Does <em>NOT</em>
* increment count of items seen. Cannot insert beyond current number of samples; if reservoir is
* not full, use update().
*
* @param value The entry to store in the reservoir
* @param pos The position at which to store the entry
*/
void insertValueAtPosition(final long value, final int pos) {
if ((pos < 0) || (pos >= getNumSamples())) {
throw new SketchesArgumentException("Insert position must be between 0 and " + getNumSamples()
+ ", inclusive. Received: " + pos);
}
data_[pos] = value;
}
/**
* Used during union operations to update count of items seen. Does <em>NOT</em> check sign, but
* will throw an exception if the final result exceeds the maximum possible items seen value.
*
* @param inc The value added
*/
void forceIncrementItemsSeen(final long inc) {
itemsSeen_ += inc;
if (itemsSeen_ > MAX_ITEMS_SEEN) {
throw new SketchesStateException("Sketch has exceeded capacity for total items seen. "
+ "Limit: " + MAX_ITEMS_SEEN + ", found: " + itemsSeen_);
}
}
ReservoirLongsSketch copy() {
final long[] dataCopy = Arrays.copyOf(data_, currItemsAlloc_);
return new ReservoirLongsSketch(reservoirSize_, currItemsAlloc_, itemsSeen_, rf_, dataCopy);
}
// Note: the downsampling approach may appear strange but avoids several edge cases
// Q1: Why not just permute samples and then take the first "newK" of them?
// A1: We're assuming the sketch source is read-only
// Q2: Why not copy the source sketch, permute samples, then truncate the sample array and
// reduce k?
// A2: That would involve allocating memory proportional to the old k. Even if only a
// temporary violation of maxK, we're avoiding violating it at all.
ReservoirLongsSketch downsampledCopy(final int maxK) {
final ReservoirLongsSketch rls = new ReservoirLongsSketch(maxK, rf_);
for (final long l: getSamples()) {
// Pretending old implicit weights are all 1. Not true in general, but they're all
// equal so update should work properly as long as we update itemsSeen_ at the end.
rls.update(l);
}
// need to adjust number seen to get correct new implicit weights
if (rls.getN() < itemsSeen_) {
rls.forceIncrementItemsSeen(itemsSeen_ - rls.getN());
}
return rls;
}
/**
* Increases allocated sampling size by (adjusted) ResizeFactor and copies items from old sampling.
*/
private void growReservoir() {
currItemsAlloc_ = SamplingUtil.getAdjustedSize(reservoirSize_, currItemsAlloc_ * rf_.getValue());
data_ = java.util.Arrays.copyOf(data_, currItemsAlloc_);
}
}
| 2,664 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/sampling/VarOptItemsUnion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.sampling;
import static org.apache.datasketches.common.Util.LS;
import static org.apache.datasketches.sampling.PreambleUtil.EMPTY_FLAG_MASK;
import static org.apache.datasketches.sampling.PreambleUtil.FAMILY_BYTE;
import static org.apache.datasketches.sampling.PreambleUtil.SER_VER;
import static org.apache.datasketches.sampling.PreambleUtil.extractFlags;
import static org.apache.datasketches.sampling.PreambleUtil.extractMaxK;
import static org.apache.datasketches.sampling.PreambleUtil.extractN;
import static org.apache.datasketches.sampling.PreambleUtil.extractOuterTauDenominator;
import static org.apache.datasketches.sampling.PreambleUtil.extractOuterTauNumerator;
import static org.apache.datasketches.sampling.PreambleUtil.extractPreLongs;
import static org.apache.datasketches.sampling.PreambleUtil.extractSerVer;
import static org.apache.datasketches.sampling.VarOptItemsSketch.newInstanceFromUnionResult;
import java.util.ArrayList;
import java.util.Iterator;
import org.apache.datasketches.common.ArrayOfItemsSerDe;
import org.apache.datasketches.common.Family;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
/**
* Provides a unioning operation over varopt sketches. This union allows the sample size k to float,
* possibly increasing or decreasing as warranted by the available data.
*
* @author Jon Malkin
* @author Kevin Lang
* @param <T> Type of items
*/
public final class VarOptItemsUnion<T> {
private VarOptItemsSketch<T> gadget_;
private final int maxK_;
private long n_; // cumulative over all input sketches
// outer tau is the largest tau of any input sketch
private double outerTauNumer; // total weight of all input R-zones where tau = outerTau
// total cardinality of the same R-zones, or zero if no input sketch was in estimation mode
private long outerTauDenom;
/*
IMPORTANT NOTE: the "gadget" in the union object appears to be a varopt sketch,
but in fact is NOT because it doesn't satisfy the mathematical definition
of a varopt sketch of the concatenated input streams. Therefore it could be different
from a true varopt sketch with that value of K, in which case it could easily provide
worse estimation accuracy for subset-sum queries.
This should not surprise you; the approximation guarantees of varopt sketches
do not apply to things that merely resemble varopt sketches.
However, even though the gadget is not a varopt sketch, the result
of the unioning process IS a varopt sketch. It is constructed by a
somewhat complicated "resolution" process which determines the largest K
that a valid varopt sketch could have given the available information,
then constructs a varopt sketch of that size and returns it.
However, the gadget itself is not touched during the resolution process,
and additional sketches could subsequently be merged into the union,
at which point a varopt result could again be requested.
*/
/*
Explanation of "marked items" in the union's gadget:
The boolean value "true" in an pair indicates that the item
came from an input sketch's R zone, so it is already the result of sampling.
Therefore it must not wind up in the H zone of the final result, because
that would imply that the item is "exact".
However, it is okay for a marked item to hang out in the gadget's H zone for a while.
And once the item has moved to the gadget's R zone, the mark is never checked again,
so no effort is made to ensure that its value is preserved or even makes sense.
*/
/*
Note: if the computer could perform exact real-valued arithmetic, the union could finalize
its result by reducing k until inner_tau > outer_tau. [Due to the vagaries of floating point
arithmetic, we won't attempt to detect and specially handle the inner_tau = outer_tau special
case.]
In fact, we won't even look at tau while while reducing k. Instead the logic will be based
on the more robust integer quantity num_marks_in_h_ in the gadget. It is conceivable that due
to round-off error we could end up with inner_tau slightly less than outer_tau, but that should
be fairly harmless since we will have achieved our goal of getting the marked items out of H.
Also, you might be wondering why we are bothering to maintain the numerator and denominator
separately instead of just having a single variable outer_tau. This allows us (in certain
cases) to add an input's entire R-zone weight into the result sketch, as opposed to subdividing
it then adding it back up. That would be a source of numerical inaccuracy. And even
more importantly, this design choice allows us to exactly re-construct the input sketch
when there is only one of them.
*/
/**
* Empty constructor
*
* @param maxK Maximum allowed reservoir capacity for this union
*/
private VarOptItemsUnion(final int maxK) {
maxK_ = maxK;
n_ = 0;
outerTauNumer = 0.0;
outerTauDenom = 0;
gadget_ = VarOptItemsSketch.newInstanceAsGadget(maxK);
}
/**
* Creates an empty Union with a maximum capacity of size k.
*
* @param <T> The type of item this union contains
* @param maxK The maximum allowed capacity of the unioned result
* @return A new VarOptItemsUnion
*/
public static <T> VarOptItemsUnion<T> newInstance(final int maxK) {
return new VarOptItemsUnion<>(maxK);
}
/**
* Instantiates a Union from Memory
*
* @param <T> The type of item this sketch contains
* @param srcMem Memory object containing a serialized union
* @param serDe An instance of ArrayOfItemsSerDe
* @return A VarOptItemsUnion created from the provided Memory
*/
public static <T> VarOptItemsUnion<T> heapify(final Memory srcMem,
final ArrayOfItemsSerDe<T> serDe) {
Family.VAROPT_UNION.checkFamilyID(srcMem.getByte(FAMILY_BYTE));
long n = 0;
double outerTauNum = 0.0;
long outerTauDenom = 0;
final int numPreLongs = extractPreLongs(srcMem);
final int serVer = extractSerVer(srcMem);
final boolean isEmpty = (extractFlags(srcMem) & EMPTY_FLAG_MASK) != 0;
final int maxK = extractMaxK(srcMem);
if (!isEmpty) {
n = extractN(srcMem);
outerTauNum = extractOuterTauNumerator(srcMem);
outerTauDenom = extractOuterTauDenominator(srcMem);
}
if (serVer != SER_VER) {
throw new SketchesArgumentException(
"Possible Corruption: Ser Ver must be " + SER_VER + ": " + serVer);
}
final boolean preLongsEqMin = (numPreLongs == Family.VAROPT_UNION.getMinPreLongs());
final boolean preLongsEqMax = (numPreLongs == Family.VAROPT_UNION.getMaxPreLongs());
if (!preLongsEqMin && !preLongsEqMax) {
throw new SketchesArgumentException("Possible corruption: Non-empty union with only "
+ Family.VAROPT_UNION.getMinPreLongs() + "preLongs");
}
final VarOptItemsUnion<T> viu = new VarOptItemsUnion<>(maxK);
if (isEmpty) {
viu.gadget_ = VarOptItemsSketch.newInstanceAsGadget(maxK);
} else {
viu.n_ = n;
viu.outerTauNumer = outerTauNum;
viu.outerTauDenom = outerTauDenom;
final int preLongBytes = numPreLongs << 3;
final Memory sketchMem = srcMem.region(preLongBytes, srcMem.getCapacity() - preLongBytes);
viu.gadget_ = VarOptItemsSketch.heapify(sketchMem, serDe);
}
return viu;
}
/**
* Union the given sketch.
*
*<p>This method can be repeatedly called.</p>
*
* @param sketchIn The sketch to be merged
*/
public void update(final VarOptItemsSketch<T> sketchIn) {
if (sketchIn != null) {
mergeInto(sketchIn);
}
}
/**
* Union the given Memory image of the sketch.
*
*<p>This method can be repeatedly called.</p>
*
* @param mem Memory image of sketch to be merged
* @param serDe An instance of ArrayOfItemsSerDe
*/
public void update(final Memory mem, final ArrayOfItemsSerDe<T> serDe) {
if (mem != null) {
final VarOptItemsSketch<T> vis = VarOptItemsSketch.heapify(mem, serDe);
mergeInto(vis);
}
}
/**
* Union a reservoir sketch. The reservoir sample is treated as if all items were added with a
* weight of 1.0.
*
* @param reservoirIn The reservoir sketch to be merged
*/
public void update(final ReservoirItemsSketch<T> reservoirIn) {
if (reservoirIn != null) {
mergeReservoirInto(reservoirIn);
}
}
/**
* Gets the varopt sketch resulting from the union of any input sketches.
*
* @return A varopt sketch
*/
public VarOptItemsSketch<T> getResult() {
// If no marked items in H, gadget is already valid mathematically. We can return what is
// basically just a copy of the gadget.
if (gadget_.getNumMarksInH() == 0) {
return simpleGadgetCoercer();
} else {
// At this point, we know that marked items are present in H. So:
// 1. Result will necessarily be in estimation mode
// 2. Marked items currently in H need to be absorbed into reservoir (R)
final VarOptItemsSketch<T> tmp = detectAndHandleSubcaseOfPseudoExact();
if (tmp != null) {
// sub-case detected and handled, so return the result
return tmp;
} else {
// continue with main logic
return migrateMarkedItemsByDecreasingK();
}
}
}
/**
* Resets this sketch to the empty state, but retains the original value of max k.
*/
public void reset() {
gadget_.reset();
n_ = 0;
outerTauNumer = 0.0;
outerTauDenom = 0;
}
/**
* Returns a human-readable summary of the sketch, without items.
*
* @return A string version of the sketch summary
*/
@Override
public String toString() {
assert gadget_ != null;
final StringBuilder sb = new StringBuilder();
final String thisSimpleName = this.getClass().getSimpleName();
sb.append(LS)
.append("### ").append(thisSimpleName).append(" SUMMARY: ").append(LS)
.append(" Max k: ").append(maxK_).append(LS)
.append(" Gadget summary: ").append(gadget_.toString())
.append("### END UNION SUMMARY").append(LS);
return sb.toString();
}
/**
* Returns a byte array representation of this union
*
* @param serDe An instance of ArrayOfItemsSerDe
* @return a byte array representation of this union
*/
public byte[] toByteArray(final ArrayOfItemsSerDe<T> serDe) {
assert gadget_ != null;
if (gadget_.getNumSamples() == 0) {
return toByteArray(serDe, null);
} else {
return toByteArray(serDe, gadget_.getItem(0).getClass());
}
}
/**
* Returns a byte array representation of this union. This method should be used when the array
* elements are subclasses of a common base class.
*
* @param serDe An instance of ArrayOfItemsSerDe
* @param clazz A class to which the items are cast before serialization
* @return a byte array representation of this union
*/
// gadgetBytes will be null only if gadget_ == null AND empty == true
public byte[] toByteArray(final ArrayOfItemsSerDe<T> serDe, final Class<?> clazz) {
final int preLongs, outBytes;
final boolean empty = gadget_.getNumSamples() == 0;
final byte[] gadgetBytes = (empty ? null : gadget_.toByteArray(serDe, clazz));
if (empty) {
preLongs = Family.VAROPT_UNION.getMinPreLongs();
outBytes = 8;
} else {
preLongs = Family.VAROPT_UNION.getMaxPreLongs();
outBytes = (preLongs << 3) + gadgetBytes.length; // for longs, we know the size
}
final byte[] outArr = new byte[outBytes];
final WritableMemory mem = WritableMemory.writableWrap(outArr);
// build preLong
PreambleUtil.insertPreLongs(mem, preLongs); // Byte 0
PreambleUtil.insertSerVer(mem, SER_VER); // Byte 1
PreambleUtil.insertFamilyID(mem, Family.VAROPT_UNION.getID()); // Byte 2
if (empty) {
PreambleUtil.insertFlags(mem, EMPTY_FLAG_MASK);
} else {
PreambleUtil.insertFlags(mem, 0); // Byte 3
}
PreambleUtil.insertMaxK(mem, maxK_); // Bytes 4-7
if (!empty) {
PreambleUtil.insertN(mem, n_); // Bytes 8-15
PreambleUtil.insertOuterTauNumerator(mem, outerTauNumer); // Bytes 16-23
PreambleUtil.insertOuterTauDenominator(mem, outerTauDenom); // Bytes 24-31
final int preBytes = preLongs << 3;
mem.putByteArray(preBytes, gadgetBytes, 0, gadgetBytes.length);
}
return outArr;
}
// package-private for testing
double getOuterTau() {
if (outerTauDenom == 0) {
return 0.0;
} else {
return outerTauNumer / outerTauDenom;
}
}
private void mergeInto(final VarOptItemsSketch<T> sketch) {
final long sketchN = sketch.getN();
if (sketchN == 0) {
return;
}
n_ += sketchN;
final VarOptItemsSamples<T> sketchSamples = sketch.getSketchSamples();
// insert H region items
Iterator<VarOptItemsSamples<T>.WeightedSample> sketchIterator;
sketchIterator = sketchSamples.getHIterator();
while (sketchIterator.hasNext()) {
final VarOptItemsSamples<T>.WeightedSample ws = sketchIterator.next();
gadget_.update(ws.getItem(), ws.getWeight(), false);
}
// insert R region items
sketchIterator = sketchSamples.getWeightCorrRIter();
while (sketchIterator.hasNext()) {
final VarOptItemsSamples<T>.WeightedSample ws = sketchIterator.next();
gadget_.update(ws.getItem(), ws.getWeight(), true);
}
// resolve tau
if (sketch.getRRegionCount() > 0) {
final double sketchTau = sketch.getTau();
final double outerTau = getOuterTau();
if (outerTauDenom == 0) {
// detect first estimation mode sketch and grab its tau
outerTauNumer = sketch.getTotalWtR();
outerTauDenom = sketch.getRRegionCount();
} else if (sketchTau > outerTau) {
// switch to a bigger value of outerTau
outerTauNumer = sketch.getTotalWtR();
outerTauDenom = sketch.getRRegionCount();
} else if (sketchTau == outerTau) {
// Ok if previous equality test isn't quite perfect. Mistakes in either direction should
// be fairly benign.
// Without conceptually changing outerTau, update number and denominator. In particular,
// add the total weight of the incoming reservoir to the running total.
outerTauNumer += sketch.getTotalWtR();
outerTauDenom += sketch.getRRegionCount();
}
// do nothing if sketch's tau is smaller than outerTau
}
}
/**
* Used to merge a reservoir sample into varopt, assuming the reservoir was built with items
* of weight 1.0. Logic is very similar to mergeInto() for a sketch with no heavy items.
* @param reservoir Reservoir sketch to merge into this union
*/
private void mergeReservoirInto(final ReservoirItemsSketch<T> reservoir) {
final long reservoirN = reservoir.getN();
if (reservoirN == 0) {
return;
}
n_ += reservoirN;
final int reservoirK = reservoir.getK();
if (reservoir.getN() <= reservoirK) {
// exact mode, so just insert and be done
for (T item : reservoir.getRawSamplesAsList()) {
gadget_.update(item, 1.0, false);
}
} else {
// sampling mode. We'll replicate a weight-correcting iterator
final double reservoirTau = reservoir.getImplicitSampleWeight();
double cumWeight = 0.0;
final ArrayList<T> samples = reservoir.getRawSamplesAsList();
for (int i = 0; i < (reservoirK - 1); ++i) {
gadget_.update(samples.get(i), reservoirTau, true);
cumWeight += reservoirTau;
}
// correct for any numerical discrepancies with the last item
gadget_.update(samples.get(reservoirK - 1), reservoir.getN() - cumWeight, true);
// resolve tau
final double outerTau = getOuterTau();
if (outerTauDenom == 0) {
// detect first estimation mode sketch and grab its tau
outerTauNumer = reservoirN;
outerTauDenom = reservoirK;
} else if (reservoirTau > outerTau) {
// switch to a bigger value of outerTau
outerTauNumer = reservoirN;
outerTauDenom = reservoirK;
} else if (reservoirTau == outerTau) {
// Ok if previous equality test isn't quite perfect. Mistakes in either direction should
// be fairly benign.
// Without conceptually changing outerTau, update number and denominator. In particular,
// add the total weight of the incoming reservoir to the running total.
outerTauNumer += reservoirN;
outerTauDenom += reservoirK;
}
// do nothing if reservoir "tau" is no smaller than outerTau
}
}
/**
* When there are no marked items in H, teh gadget is mathematically equivalent to a valid
* varopt sketch. This method simply returns a copy (without perserving marks).
*
* @return A shallow copy of the gadget as valid varopt sketch
*/
private VarOptItemsSketch<T> simpleGadgetCoercer() {
assert gadget_.getNumMarksInH() == 0;
return gadget_.copyAndSetN(true, n_);
}
/**
* This coercer directly transfers marked items from the gadget's H into the result's R.
* Deciding whether that is a valid thing to do is the responsibility of the caller. Currently,
* this is only used for a subcase of pseudo-exact, but later it might be used by other
* subcases as well.
*
* @return A sketch derived from the gadget, with marked items moved to the reservoir
*/
private VarOptItemsSketch<T> markMovingGadgetCoercer() {
final int resultK = gadget_.getHRegionCount() + gadget_.getRRegionCount();
int resultH = 0;
int resultR = 0;
int nextRPos = resultK; // = (resultK+1)-1, to fill R region from back to front
final ArrayList<T> data = new ArrayList<>(resultK + 1);
final ArrayList<Double> weights = new ArrayList<>(resultK + 1);
// Need arrays filled to use set() and be able to fill from end forward.
// Ideally would create as arrays but trying to avoid forcing user to pass a Class<?>
for (int i = 0; i < (resultK + 1); ++i) {
data.add(null);
weights.add(null);
}
final VarOptItemsSamples<T> sketchSamples = gadget_.getSketchSamples();
// insert R region items, ignoring weights
// Currently (May 2017) this next block is unreachable; this coercer is used only in the
// pseudo-exact case in which case there are no items natively in R, only marked items in H
// that will be moved into R as part of the coercion process.
Iterator<VarOptItemsSamples<T>.WeightedSample> sketchIterator;
sketchIterator = sketchSamples.getRIterator();
while (sketchIterator.hasNext()) {
final VarOptItemsSamples<T>.WeightedSample ws = sketchIterator.next();
data.set(nextRPos, ws.getItem());
weights.set(nextRPos, -1.0);
++resultR;
--nextRPos;
}
double transferredWeight = 0;
// insert H region items
sketchIterator = sketchSamples.getHIterator();
while (sketchIterator.hasNext()) {
final VarOptItemsSamples<T>.WeightedSample ws = sketchIterator.next();
if (ws.getMark()) {
data.set(nextRPos, ws.getItem());
weights.set(nextRPos, -1.0);
transferredWeight += ws.getWeight();
++resultR;
--nextRPos;
} else {
data.set(resultH, ws.getItem());
weights.set(resultH, ws.getWeight());
++resultH;
}
}
assert (resultH + resultR) == resultK;
assert Math.abs(transferredWeight - outerTauNumer) < 1e-10;
final double resultRWeight = gadget_.getTotalWtR() + transferredWeight;
final long resultN = n_;
// explicitly set values for the gap
data.set(resultH, null);
weights.set(resultH, -1.0);
// create sketch with the new values
return newInstanceFromUnionResult(data, weights, resultK, resultN, resultH, resultR, resultRWeight);
}
private VarOptItemsSketch<T> detectAndHandleSubcaseOfPseudoExact() {
// gadget is seemingly exact
final boolean condition1 = gadget_.getRRegionCount() == 0;
// but there are marked items in H, so only _pseudo_ exact
final boolean condition2 = gadget_.getNumMarksInH() > 0;
// if gadget is pseudo-exact and the number of marks equals outerTauDenom, then we can deduce
// from the bookkeeping logic of mergeInto() that all estimation mode input sketches must
// have had the same tau, so we can throw all of the marked items into a common reservoir.
final boolean condition3 = gadget_.getNumMarksInH() == outerTauDenom;
if (!(condition1 && condition2 && condition3)) {
return null;
} else {
// explicitly enforce rule that items in H should not be lighter than the sketch's tau
final boolean antiCondition4 = thereExistUnmarkedHItemsLighterThanTarget(gadget_.getTau());
if (antiCondition4) {
return null;
} else {
// conditions 1 through 4 hold
return markMovingGadgetCoercer();
}
}
}
// this is a condition checked in detectAndHandleSubcaseOfPseudoExact()
private boolean thereExistUnmarkedHItemsLighterThanTarget(final double threshold) {
for (int i = 0; i < gadget_.getHRegionCount(); ++i) {
if ((gadget_.getWeight(i) < threshold) && !gadget_.getMark(i)) {
return true;
}
}
return false;
}
// this is basically a continuation of getResult()
private VarOptItemsSketch<T> migrateMarkedItemsByDecreasingK() {
final VarOptItemsSketch<T> gcopy = gadget_.copyAndSetN(false, n_);
final int rCount = gcopy.getRRegionCount();
final int hCount = gcopy.getHRegionCount();
final int k = gcopy.getK();
assert gcopy.getNumMarksInH() > 0; // ensured by caller
// either full (of samples), or in pseudo-exact mode, or both
assert (rCount == 0) || (k == (hCount + rCount));
// if non-full and pseudo-exact, change k so that gcopy is full
if ((rCount == 0) && (hCount < k)) {
gcopy.forceSetK(hCount);
}
// Now k equals the number of samples, so reducing k will increase tau.
// Also, we know that there are at least 2 samples because 0 or 1 would have been handled
// by the earlier logic in getResult()
assert gcopy.getK() >= 2;
gcopy.decreaseKBy1();
// gcopy is now in estimation mode, just like the final result must be (due to marked items)
assert gcopy.getRRegionCount() > 0;
assert gcopy.getTau() > 0.0;
// keep reducing k until all marked items have been absorbed into the reservoir
while (gcopy.getNumMarksInH() > 0) {
assert gcopy.getK() >= 2; // because h_ and r_ are both at least 1
gcopy.decreaseKBy1();
}
gcopy.stripMarks();
return gcopy;
}
}
| 2,665 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/sampling/PreambleUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.sampling;
import static org.apache.datasketches.common.Util.LS;
import static org.apache.datasketches.common.Util.zeroPad;
import java.nio.ByteOrder;
import java.util.Locale;
import org.apache.datasketches.common.Family;
import org.apache.datasketches.common.ResizeFactor;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
//@formatter:off
/**
* This class defines the preamble items structure and provides basic utilities for some of the key
* fields.
*
* <p>
* MAP: Low significance bytes of this <i>long</i> items structure are on the right. However, the
* multi-byte integers (<i>int</i> and <i>long</i>) are stored in native byte order. The
* <i>byte</i> values are treated as unsigned.</p>
*
* <p><strong>Sketch:</strong> The count of items seen is limited to 48 bits (~256 trillion) even
* though there are adjacent unused preamble bits. The acceptance probability for an item is a
* double in the range [0,1), limiting us to 53 bits of randomness due to details of the IEEE
* floating point format. To ensure meaningful probabilities as the items seen count approaches
* capacity, we intentionally use slightly fewer bits.</p>
*
* <p>An empty reservoir sampling sketch only requires 8 bytes. A non-empty sampling sketch
* requires 16 bytes of preamble.</p>
*
* <pre>
* Long || Start Byte Adr:
* Adr:
* || 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
* 0 ||--------Reservoir Size (K)---------| Flags | FamID | SerVer | Preamble_Longs |
*
* || 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 |
* 1 ||------------------------------Items Seen Count (N)---------------------------------|
* </pre>
*
* <p><strong>Union:</strong> The reservoir union has fewer internal parameters to track and uses
* a slightly different preamble structure. The maximum reservoir size intentionally occupies the
* same byte range as the reservoir size in the sketch preamble, allowing the same methods to be
* used for reading and writing the values. The varopt union takes advantage of the same format.
* The items in the union are stored in a reservoir sketch-compatible format after the union
* preamble.
* </p>
*
* <p>An empty union only requires 8 bytes. A non-empty union requires 8 bytes of preamble.</p>
*
* <pre>
* Long || Start Byte Adr:
* Adr:
* || 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
* 0 ||---------Max Res. Size (K)---------| Flags | FamID | SerVer | Preamble_Longs |
* </pre>
*
* <p><strong>VarOpt:</strong> A VarOpt sketch has a more complex internal items structure and
* requires a larger preamble. Values serving a similar purpose in both reservoir and varopt sampling
* share the same byte ranges, allowing method re-use where practical.</p>
*
* <p>An empty varopt sample requires 8 bytes. A non-empty sketch requires 16 bytes of preamble
* for an under-full sample and otherwise 32 bytes of preamble.</p>
*
* <pre>
* Long || Start Byte Adr:
* Adr:
* || 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
* 0 ||--------Reservoir Size (K)---------| Flags | FamID | SerVer | Preamble_Longs |
*
* || 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 |
* 1 ||------------------------------Items Seen Count (N)---------------------------------|
*
* || 23 | 22 | 21 | 20 | 19 | 18 | 17 | 16 |
* 2 ||---------Item Count in R-----------|-----------Item Count in H---------------------|
*
* || 31 | 30 | 29 | 28 | 27 | 26 | 25 | 24 |
* 3 ||--------------------------------Total Weight in R----------------------------------|
* </pre>
*
* <p><strong>VarOpt Union:</strong> VarOpt unions also store more information than a reservoir
* sketch. As before, we keep values with similar o hte same meaning in corresponding locations
* actoss sketch and union formats. The items in the union are stored in a varopt sketch-compatible
* format after the union preamble.</p>
*
* <p>An empty union only requires 8 bytes. A non-empty union requires 32 bytes of preamble.</p>
*
* <pre>
* Long || Start Byte Adr:
* Adr:
* || 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
* 0 ||---------Max Res. Size (K)---------| Flags | FamID | SerVer | Preamble_Longs |
*
* || 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 |
* 1 ||------------------------------Items Seen Count (N)---------------------------------|
*
* || 23 | 22 | 21 | 20 | 19 | 18 | 17 | 16 |
* 2 ||---------------------------Outer Tau Numerator (double)----------------------------|
*
* || 31 | 30 | 29 | 28 | 27 | 26 | 25 | 24 |
* 3 ||---------------------------Outer Tau Denominator (long)----------------------------|
* </pre>
*
* @author Jon Malkin
* @author Lee Rhodes
*/
final class PreambleUtil {
private PreambleUtil() {}
// ###### DO NOT MESS WITH THIS FROM HERE ...
// Preamble byte Addresses
static final int PREAMBLE_LONGS_BYTE = 0; // Only low 6 bits used
static final int LG_RESIZE_FACTOR_BIT = 6; // upper 2 bits. Not used by compact or direct.
static final int SER_VER_BYTE = 1;
static final int FAMILY_BYTE = 2;
static final int FLAGS_BYTE = 3;
static final int RESERVOIR_SIZE_SHORT = 4; // used in ser_ver 1
static final int RESERVOIR_SIZE_INT = 4;
static final int SERDE_ID_SHORT = 6; // used in ser_ver 1
static final int ITEMS_SEEN_LONG = 8;
static final int MAX_K_SIZE_INT = 4; // used in Union only
static final int OUTER_TAU_NUM_DOUBLE = 16; // used in Varopt Union only
static final int OUTER_TAU_DENOM_LONG = 24; // used in Varopt Union only
// constants and addresses used in varopt
static final int ITEM_COUNT_H_INT = 16;
static final int ITEM_COUNT_R_INT = 20;
static final int TOTAL_WEIGHT_R_DOUBLE = 24;
static final int VO_PRELONGS_EMPTY = Family.VAROPT.getMinPreLongs();
static final int VO_PRELONGS_WARMUP = 3; // Doesn't match min or max prelongs in Family
static final int VO_PRELONGS_FULL = Family.VAROPT.getMaxPreLongs();
// flag bit masks
//static final int BIG_ENDIAN_FLAG_MASK = 1;
//static final int READ_ONLY_FLAG_MASK = 2;
static final int EMPTY_FLAG_MASK = 4;
static final int GADGET_FLAG_MASK = 128;
//Other constants
static final int SER_VER = 2;
static final boolean NATIVE_ORDER_IS_BIG_ENDIAN =
(ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN);
// STRINGS
/**
* Returns a human readable string summary of the preamble state of the given byte array.
* Used primarily in testing.
*
* @param byteArr the given byte array.
* @return the summary preamble string.
*/
static String preambleToString(final byte[] byteArr) {
final Memory mem = Memory.wrap(byteArr);
return preambleToString(mem);
}
/**
* Returns a human readable string summary of the preamble state of the given Memory.
* Note: other than making sure that the given Memory size is large
* enough for just the preamble, this does not do much value checking of the contents of the
* preamble as this is primarily a tool for debugging the preamble visually.
*
* @param mem the given Memory.
* @return the summary preamble string.
*/
static String preambleToString(final Memory mem) {
final int preLongs = getAndCheckPreLongs(mem); // make sure we can get the assumed preamble
final Family family = Family.idToFamily(mem.getByte(FAMILY_BYTE));
switch (family) {
case RESERVOIR:
case VAROPT:
return sketchPreambleToString(mem, family, preLongs);
case RESERVOIR_UNION:
case VAROPT_UNION:
return unionPreambleToString(mem, family, preLongs);
default:
throw new SketchesArgumentException("Inspecting preamble with Sampling family's "
+ "PreambleUtil with object of family " + family.getFamilyName());
}
}
private static String sketchPreambleToString(final Memory mem,
final Family family,
final int preLongs) {
final ResizeFactor rf = ResizeFactor.getRF(extractResizeFactor(mem));
final int serVer = extractSerVer(mem);
// Flags
final int flags = extractFlags(mem);
final String flagsStr = zeroPad(Integer.toBinaryString(flags), 8) + ", " + (flags);
//final boolean bigEndian = (flags & BIG_ENDIAN_FLAG_MASK) > 0;
//final String nativeOrder = ByteOrder.nativeOrder().toString();
//final boolean readOnly = (flags & READ_ONLY_FLAG_MASK) > 0;
final boolean isEmpty = (flags & EMPTY_FLAG_MASK) > 0;
final boolean isGadget = (flags & GADGET_FLAG_MASK) > 0;
final int k;
if (serVer == 1) {
final short encK = extractEncodedReservoirSize(mem);
k = ReservoirSize.decodeValue(encK);
} else {
k = extractK(mem);
}
long n = 0;
if (!isEmpty) {
n = extractN(mem);
}
final long dataBytes = mem.getCapacity() - (preLongs << 3);
final StringBuilder sb = new StringBuilder();
sb.append(LS)
.append("### END ")
.append(family.getFamilyName().toUpperCase(Locale.US))
.append(" PREAMBLE SUMMARY").append(LS)
.append("Byte 0: Preamble Longs : ").append(preLongs).append(LS)
.append("Byte 0: ResizeFactor : ").append(rf.toString()).append(LS)
.append("Byte 1: Serialization Version: ").append(serVer).append(LS)
.append("Byte 2: Family : ").append(family.toString()).append(LS)
.append("Byte 3: Flags Field : ").append(flagsStr).append(LS)
//.append(" BIG_ENDIAN_STORAGE : ").append(bigEndian).append(LS)
//.append(" (Native Byte Order) : ").append(nativeOrder).append(LS)
//.append(" READ_ONLY : ").append(readOnly).append(LS)
.append(" EMPTY : ").append(isEmpty).append(LS);
if (family == Family.VAROPT) {
sb.append(" GADGET : ").append(isGadget).append(LS);
}
sb.append("Bytes 4-7: Sketch Size (k) : ").append(k).append(LS);
if (!isEmpty) {
sb.append("Bytes 8-15: Items Seen (n) : ").append(n).append(LS);
}
if ((family == Family.VAROPT) && !isEmpty) {
final int hCount = extractHRegionItemCount(mem);
final int rCount = extractRRegionItemCount(mem);
final double totalRWeight = extractTotalRWeight(mem);
sb.append("Bytes 16-19: H region count : ").append(hCount).append(LS)
.append("Bytes 20-23: R region count : ").append(rCount).append(LS);
if (rCount > 0) {
sb.append("Bytes 24-31: R region weight : ").append(totalRWeight).append(LS);
}
}
sb.append("TOTAL Sketch Bytes : ").append(mem.getCapacity()).append(LS)
.append(" Preamble Bytes : ").append(preLongs << 3).append(LS)
.append(" Data Bytes : ").append(dataBytes).append(LS)
.append("### END ")
.append(family.getFamilyName().toUpperCase(Locale.US))
.append(" PREAMBLE SUMMARY").append(LS);
return sb.toString();
}
private static String unionPreambleToString(final Memory mem,
final Family family,
final int preLongs) {
final ResizeFactor rf = ResizeFactor.getRF(extractResizeFactor(mem));
final int serVer = extractSerVer(mem);
// Flags
final int flags = extractFlags(mem);
final String flagsStr = zeroPad(Integer.toBinaryString(flags), 8) + ", " + (flags);
//final boolean bigEndian = (flags & BIG_ENDIAN_FLAG_MASK) > 0;
//final String nativeOrder = ByteOrder.nativeOrder().toString();
//final boolean readOnly = (flags & READ_ONLY_FLAG_MASK) > 0;
final boolean isEmpty = (flags & EMPTY_FLAG_MASK) > 0;
final int k;
if (serVer == 1) {
final short encK = extractEncodedReservoirSize(mem);
k = ReservoirSize.decodeValue(encK);
} else {
k = extractK(mem);
}
final long dataBytes = mem.getCapacity() - (preLongs << 3);
return LS
+ "### END " + family.getFamilyName().toUpperCase(Locale.US) + " PREAMBLE SUMMARY" + LS
+ "Byte 0: Preamble Longs : " + preLongs + LS
+ "Byte 0: ResizeFactor : " + rf.toString() + LS
+ "Byte 1: Serialization Version : " + serVer + LS
+ "Byte 2: Family : " + family.toString() + LS
+ "Byte 3: Flags Field : " + flagsStr + LS
//+ " BIG_ENDIAN_STORAGE : " + bigEndian + LS
//+ " (Native Byte Order) : " + nativeOrder + LS
//+ " READ_ONLY : " + readOnly + LS
+ " EMPTY : " + isEmpty + LS
+ "Bytes 4-7: Max Sketch Size (maxK): " + k + LS
+ "TOTAL Sketch Bytes : " + mem.getCapacity() + LS
+ " Preamble Bytes : " + (preLongs << 3) + LS
+ " Sketch Bytes : " + dataBytes + LS
+ "### END " + family.getFamilyName().toUpperCase(Locale.US) + " PREAMBLE SUMMARY" + LS;
}
// Extraction methods
static int extractPreLongs(final Memory mem) {
return mem.getByte(PREAMBLE_LONGS_BYTE) & 0x3F;
}
static int extractResizeFactor(final Memory mem) {
return (mem.getByte(PREAMBLE_LONGS_BYTE) >>> LG_RESIZE_FACTOR_BIT) & 0x3;
}
static int extractSerVer(final Memory mem) {
return mem.getByte(SER_VER_BYTE) & 0xFF;
}
static int extractFamilyID(final Memory mem) {
return mem.getByte(FAMILY_BYTE) & 0xFF;
}
static int extractFlags(final Memory mem) {
return mem.getByte(FLAGS_BYTE) & 0xFF;
}
static short extractEncodedReservoirSize(final Memory mem) {
return mem.getShort(RESERVOIR_SIZE_SHORT);
}
static int extractK(final Memory mem) {
return mem.getInt(RESERVOIR_SIZE_INT);
}
static int extractMaxK(final Memory mem) {
return extractK(mem);
}
static long extractN(final Memory mem) {
return mem.getLong(ITEMS_SEEN_LONG);
}
static int extractHRegionItemCount(final Memory mem) {
return mem.getInt(ITEM_COUNT_H_INT);
}
static int extractRRegionItemCount(final Memory mem) {
return mem.getInt(ITEM_COUNT_R_INT);
}
static double extractTotalRWeight(final Memory mem) {
return mem.getDouble(TOTAL_WEIGHT_R_DOUBLE);
}
static double extractOuterTauNumerator(final Memory mem) {
return mem.getDouble(OUTER_TAU_NUM_DOUBLE);
}
static long extractOuterTauDenominator(final Memory mem) {
return mem.getLong(OUTER_TAU_DENOM_LONG);
}
// Insertion methods
static void insertPreLongs(final WritableMemory wmem, final int preLongs) {
final int curByte = wmem.getByte(PREAMBLE_LONGS_BYTE);
final int mask = 0x3F;
final byte newByte = (byte) ((preLongs & mask) | (~mask & curByte));
wmem.putByte(PREAMBLE_LONGS_BYTE, newByte);
}
static void insertLgResizeFactor(final WritableMemory wmem, final int rf) {
final int curByte = wmem.getByte(PREAMBLE_LONGS_BYTE);
final int shift = LG_RESIZE_FACTOR_BIT; // shift in bits
final int mask = 3;
final byte newByte = (byte) (((rf & mask) << shift) | (~(mask << shift) & curByte));
wmem.putByte(PREAMBLE_LONGS_BYTE, newByte);
}
static void insertSerVer(final WritableMemory wmem, final int serVer) {
wmem.putByte(SER_VER_BYTE, (byte) serVer);
}
static void insertFamilyID(final WritableMemory wmem, final int famId) {
wmem.putByte(FAMILY_BYTE, (byte) famId);
}
static void insertFlags(final WritableMemory wmem, final int flags) {
wmem.putByte(FLAGS_BYTE, (byte) flags);
}
static void insertK(final WritableMemory wmem, final int k) {
wmem.putInt(RESERVOIR_SIZE_INT, k);
}
static void insertMaxK(final WritableMemory wmem, final int maxK) {
insertK(wmem, maxK);
}
static void insertN(final WritableMemory wmem, final long totalSeen) {
wmem.putLong(ITEMS_SEEN_LONG, totalSeen);
}
static void insertHRegionItemCount(final WritableMemory wmem, final int hCount) {
wmem.putInt(ITEM_COUNT_H_INT, hCount);
}
static void insertRRegionItemCount(final WritableMemory wmem, final int rCount) {
wmem.putInt(ITEM_COUNT_R_INT, rCount);
}
static void insertTotalRWeight(final WritableMemory wmem, final double weight) {
wmem.putDouble(TOTAL_WEIGHT_R_DOUBLE, weight);
}
static void insertOuterTauNumerator(final WritableMemory wmem, final double numer) {
wmem.putDouble(OUTER_TAU_NUM_DOUBLE, numer);
}
static void insertOuterTauDenominator(final WritableMemory wmem, final long denom) {
wmem.putLong(OUTER_TAU_DENOM_LONG, denom);
}
/**
* Checks Memory for capacity to hold the preamble and returns the extracted preLongs.
* @param mem the given Memory
* @return the extracted prelongs value.
*/
static int getAndCheckPreLongs(final Memory mem) {
final long cap = mem.getCapacity();
if (cap < 8) { throwNotBigEnough(cap, 8); }
final int preLongs = mem.getByte(0) & 0x3F;
final int required = Math.max(preLongs << 3, 8);
if (cap < required) { throwNotBigEnough(cap, required); }
return preLongs;
}
private static void throwNotBigEnough(final long cap, final int required) {
throw new SketchesArgumentException(
"Possible Corruption: Size of byte array or Memory not large enough: Size: " + cap
+ ", Required: " + required);
}
}
| 2,666 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/sampling/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* <p>This package is dedicated to streaming algorithms that enable fixed size, uniform sampling of
* weighted and unweighted items from a stream.</p>
*
* <p>These sketches are mergeable and can be serialized and deserialized to/from a compact
* form.</p>
* @see org.apache.datasketches.sampling.ReservoirItemsSketch
* @see org.apache.datasketches.sampling.ReservoirLongsSketch
* @see org.apache.datasketches.sampling.VarOptItemsSketch
*/
package org.apache.datasketches.sampling;
| 2,667 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/sampling/SamplingUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.sampling;
import static org.apache.datasketches.common.BoundsOnBinomialProportions.approximateLowerBoundOnP;
import static org.apache.datasketches.common.BoundsOnBinomialProportions.approximateUpperBoundOnP;
import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
/**
* Common utility functions for the sampling family of sketches.
*
* @author Jon Malkin
*/
final class SamplingUtil {
/**
* Number of standard deviations to use for subset sum error bounds
*/
private static final double DEFAULT_KAPPA = 2.0;
private SamplingUtil() {}
/**
* Checks if target sampling allocation is more than 50% of max sampling size. If so, returns
* max sampling size, otherwise passes through the target size.
*
* @param maxSize Maximum allowed reservoir size, as from getK()
* @param resizeTarget Next size based on a pure ResizeFactor scaling
* @return <code>(reservoirSize_ < 2*resizeTarget ? reservoirSize_ : resizeTarget)</code>
*/
static int getAdjustedSize(final int maxSize, final int resizeTarget) {
if (maxSize - (resizeTarget << 1) < 0L) {
return maxSize;
}
return resizeTarget;
}
static double nextDoubleExcludeZero() {
double r = rand().nextDouble();
while (r == 0.0) {
r = rand().nextDouble();
}
return r;
}
static int startingSubMultiple(final int lgTarget, final int lgRf, final int lgMin) {
return (lgTarget <= lgMin)
? lgMin : (lgRf == 0) ? lgTarget
: (lgTarget - lgMin) % lgRf + lgMin;
}
static double pseudoHypergeometricUBonP(final long n, final int k, final double samplingRate) {
final double adjustedKappa = DEFAULT_KAPPA * Math.sqrt(1 - samplingRate);
return approximateUpperBoundOnP(n, k, adjustedKappa);
}
static double pseudoHypergeometricLBonP(final long n, final int k, final double samplingRate) {
final double adjustedKappa = DEFAULT_KAPPA * Math.sqrt(1 - samplingRate);
return approximateLowerBoundOnP(n, k, adjustedKappa);
}
public static Random rand() {
return ThreadLocalRandom.current();
}
}
| 2,668 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/req/ReqSketchIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.req;
import java.util.List;
import org.apache.datasketches.quantilescommon.QuantilesFloatsSketchIterator;
/**
* Iterator over all retained items of the ReqSketch. The order is not defined.
*
* @author Lee Rhodes
*/
public final class ReqSketchIterator implements QuantilesFloatsSketchIterator {
private List<ReqCompactor> compactors;
private int cIndex;
private int bIndex;
private int numRetainedItems;
private FloatBuffer currentBuf;
ReqSketchIterator(final ReqSketch sketch) {
compactors = sketch.getCompactors();
numRetainedItems = sketch.getNumRetained();
currentBuf = compactors.get(0).getBuffer();
cIndex = 0;
bIndex = -1;
}
@Override
public float getQuantile() {
return currentBuf.getItem(bIndex);
}
@Override
public long getWeight() {
return 1 << cIndex;
}
@Override
public boolean next() {
if ((numRetainedItems == 0)
|| ((cIndex == (compactors.size() - 1)) && (bIndex == (currentBuf.getCount() - 1)))) {
return false;
}
if (bIndex == (currentBuf.getCount() - 1)) {
cIndex++;
currentBuf = compactors.get(cIndex).getBuffer();
bIndex = 0;
} else {
bIndex++;
}
return true;
}
}
| 2,669 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/req/ReqDebug.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.req;
/**
* The signaling interface that allows comprehensive analysis of the ReqSketch and ReqCompactor
* while eliminating code clutter in the main classes. The implementation of this interface can be
* found in the test tree.
*
* @author Lee Rhodes
*/
public interface ReqDebug {
//Sketch signals
/**
* Emit the start signal
* @param sk the sketch
*/
void emitStart(ReqSketch sk);
/**
* Emit Start Compress
*/
void emitStartCompress();
/**
* Emit compress done.
*/
void emitCompressDone();
/**
* Emit all horizontal lists
*/
void emitAllHorizList();
/**
* Emit Must add compactor
*/
void emitMustAddCompactor();
//Compactor signals
/**
* Emit Compaction Start.
* @param lgWeight compactor lgWeight or height
*/
void emitCompactingStart(byte lgWeight);
/**
* Emit new compactor configuration
* @param lgWeight the log weight
*/
void emitNewCompactor(byte lgWeight);
/**
* Emit adjusting section size and number of sections.
* @param lgWeight the log weight
*/
void emitAdjSecSizeNumSec(byte lgWeight);
/**
* Emit Compaction details.
* @param compactionStart the offset of compaction start
* @param compactionEnd the offset of compaction end
* @param secsToCompact the number of sections to compact
* @param promoteLen the length of the promotion field
* @param coin the state of the random coin.
*/
void emitCompactionDetail(int compactionStart, int compactionEnd,
int secsToCompact, int promoteLen, boolean coin);
/**
* Emit compaction done and number of compactions so far.
* @param lgWeight the log weight
*/
void emitCompactionDone(byte lgWeight);
}
| 2,670 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/req/FloatBuffer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.req;
import static org.apache.datasketches.quantilescommon.QuantileSearchCriteria.INCLUSIVE;
import java.util.Arrays;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.memory.WritableBuffer;
import org.apache.datasketches.memory.WritableMemory;
import org.apache.datasketches.quantilescommon.InequalitySearch;
import org.apache.datasketches.quantilescommon.QuantileSearchCriteria;
/**
* A special buffer of floats specifically designed to support the ReqCompactor class.
*
* @author Lee Rhodes
*/
class FloatBuffer {
private static final String LS = System.getProperty("line.separator");
private float[] arr_;
private int count_;
private int capacity_;
private final int delta_;
private boolean sorted_;
private final boolean spaceAtBottom_; //tied to hra
/**
* Constructs an new empty FloatBuffer with an initial capacity specified by
* the <code>capacity</code> argument.
*
* @param capacity the initial capacity.
* @param delta add space in increments of this size
* @param spaceAtBottom if true, create any extra space at the bottom of the buffer,
* otherwise, create any extra space at the top of the buffer.
*/
FloatBuffer(final int capacity, final int delta, final boolean spaceAtBottom) {
arr_ = new float[capacity];
count_ = 0;
capacity_ = capacity;
delta_ = delta;
sorted_ = true;
spaceAtBottom_ = spaceAtBottom;
}
/**
* Copy Constructor
* @param buf the FloatBuffer to be copied into this one
*/
FloatBuffer(final FloatBuffer buf) {
arr_ = buf.arr_.clone();
count_ = buf.count_;
capacity_ = buf.capacity_;
delta_ = buf.delta_;
sorted_ = buf.sorted_;
spaceAtBottom_ = buf.spaceAtBottom_;
}
/**
* Exact construction from elements.
* The active region must be properly positioned in the array.
* @param arr the array to be used directly as the internal array
* @param count the number of active elements in the given array
* @param capacity the initial capacity
* @param delta add space in increments of this size
* @param sorted true if already sorted
* @param spaceAtBottom if true, create any extra space at the bottom of the buffer,
* otherwise, create any extra space at the top of the buffer.
*/
private FloatBuffer(final float[] arr, final int count, final int capacity, final int delta,
final boolean sorted, final boolean spaceAtBottom) {
arr_ = arr;
count_ = count;
capacity_ = capacity;
delta_ = delta;
sorted_ = sorted;
spaceAtBottom_ = spaceAtBottom;
}
/**
* Used by ReqSerDe. The array is only the active region and will be positioned
* based on capacity, delta, and sab. This copies over the sorted flag.
* @param arr the active items extracted from the deserialization.
* @param count the number of active items
* @param capacity the capacity of the internal array
* @param delta add space in this increment
* @param sorted if the incoming array is sorted
* @param sab equivalent to the HRA flag, e.g., space-at-bottom.
* @return a new FloatBuffer
*/
static FloatBuffer reconstruct(
final float[] arr,
final int count,
final int capacity,
final int delta,
final boolean sorted,
final boolean sab //hra
) {
final float[] farr = new float[capacity];
if (sab) {
System.arraycopy(arr, 0, farr, capacity - count, count);
} else {
System.arraycopy(arr, 0, farr, 0, count);
}
return new FloatBuffer(farr, count, capacity, delta, sorted, sab);
}
/**
* Wraps the given array to use as the internal array; thus no copies. For internal use.
* @param arr the given array
* @param isSorted set true, if incoming array is already sorted.
* @param spaceAtBottom if true, create any extra space at the bottom of the buffer,
* otherwise, create any extra space at the top of the buffer.
* @return this, which will be sorted, if necessary.
*/
static FloatBuffer wrap(final float[] arr, final boolean isSorted, final boolean spaceAtBottom) {
final FloatBuffer buf = new FloatBuffer(arr, arr.length, arr.length, 0, isSorted, spaceAtBottom);
buf.sort();
return buf;
}
/**
* Appends the given item to the active array and increments the active count.
* This will expand the array if necessary.
* @param item the given item
* @return this
*/
FloatBuffer append(final float item) {
ensureSpace(1);
final int index = spaceAtBottom_ ? capacity_ - count_ - 1 : count_;
arr_[index] = item;
count_++;
sorted_ = false;
return this;
}
/**
* Ensures that the capacity of this FloatBuffer is at least newCapacity.
* If newCapacity < capacity(), no action is taken.
* @param newCapacity the new desired capacity
* @return this
*/
FloatBuffer ensureCapacity(final int newCapacity) {
if (newCapacity > capacity_) {
final float[] out = new float[newCapacity];
final int srcPos = spaceAtBottom_ ? capacity_ - count_ : 0;
final int destPos = spaceAtBottom_ ? newCapacity - count_ : 0;
System.arraycopy(arr_, srcPos, out, destPos, count_);
arr_ = out;
capacity_ = newCapacity;
}
return this;
}
/**
* Ensures that the space remaining (capacity() - getCount()) is at least the given space.
* @param space the requested space remaining
* @return this
*/
private FloatBuffer ensureSpace(final int space) {
if (count_ + space > capacity_) {
final int newCap = count_ + space + delta_;
ensureCapacity(newCap);
}
return this;
}
/**
* Returns a reference to the internal quantiles array. Be careful and don't modify this array!
* @return the internal quantiles array.
*/
float[] getArray() {
return arr_;
}
/**
* Gets the current capacity of this FloatBuffer. The capacity is the total amount of storage
* currently available without expanding the array.
*
* @return the current capacity
*/
int getCapacity() {
return capacity_;
}
/**
* Returns the count of items based on the given criteria.
* Also used in test.
* @param item the given item
* @param searchCrit the chosen criterion: LT, LT Strict, or LE
* @return count of items based on the given criterion.
*/
int getCountWithCriterion(final float item, final QuantileSearchCriteria searchCrit) {
assert !Float.isNaN(item) : "Float items must not be NaN.";
if (!sorted_) { sort(); } //we must be sorted!
int low = 0; //Initialized to space at top
int high = count_ - 1;
if (spaceAtBottom_) {
low = capacity_ - count_;
high = capacity_ - 1;
}
final InequalitySearch crit = (searchCrit == INCLUSIVE) ? InequalitySearch.LE : InequalitySearch.LT;
final int index = InequalitySearch.find(arr_, low, high, item, crit);
return index == -1 ? 0 : index - low + 1;
}
/**
* Returns a sorted FloatBuffer of the odd or even offsets from the range startOffset (inclusive)
* to endOffset (exclusive). The size of the range must be of even size.
* The offsets are with respect to the start of the active region and independent of the
* location of the active region within the overall buffer. The requested region will be sorted
* first.
* @param startOffset the starting offset within the active region
* @param endOffset the end offset within the active region, exclusive
* @param odds if true, return the odds, otherwise return the evens.
* @return the selected odds from the range
*/
FloatBuffer getEvensOrOdds(final int startOffset, final int endOffset, final boolean odds) {
final int start = spaceAtBottom_ ? capacity_ - count_ + startOffset : startOffset;
final int end = spaceAtBottom_ ? capacity_ - count_ + endOffset : endOffset;
sort();
final int range = endOffset - startOffset;
if ((range & 1) == 1) {
throw new SketchesArgumentException("Input range size must be even");
}
final int odd = odds ? 1 : 0;
final float[] out = new float[range / 2];
for (int i = start + odd, j = 0; i < end; i += 2, j++) {
out[j] = arr_[i];
}
return wrap(out, true, spaceAtBottom_);
}
/**
* Gets an item from the backing array given its index.
* Only used in test or debug.
* @param index the given index
* @return an item given its backing array index
*/
float getItemFromIndex(final int index) {
return arr_[index];
}
/**
* Gets an item given its offset in the active region
* @param offset the given offset in the active region
* @return an item given its offset
*/
float getItem(final int offset) {
final int index = spaceAtBottom_ ? capacity_ - count_ + offset : offset;
return arr_[index];
}
/**
* Returns the delta margin
* @return the delta margin
*/
int getDelta() {
return delta_;
}
/**
* Returns the active item count.
*
* @return the active item count of this buffer.
*/
int getCount() {
return count_;
}
/**
* Gets available space, which is getCapacity() - getCount().
* When spaceAtBottom is true this is the start position for active data, otherwise it is zero.
* @return available space
*/
int getSpace() {
return capacity_ - count_;
}
/**
* Returns the space at bottom flag
* @return the space at bottom flag
*/
boolean isSpaceAtBottom() {
return spaceAtBottom_;
}
/**
* Returns true if getCount() == 0.
* @return true if getCount() == 0.
*/
boolean isEmpty() {
return count_ == 0;
}
/**
* Returns true iff this is exactly equal to that FloatBuffer.
* @param that the other buffer
* @return true iff this is exactly equal to that FloatBuffer.
*/
boolean isEqualTo(final FloatBuffer that) {
if (capacity_ != that.capacity_
|| count_ != that.count_
|| delta_ != that.delta_
|| sorted_ != that.sorted_
|| spaceAtBottom_ != that.spaceAtBottom_) { return false; }
for (int i = 0; i < capacity_; i++) {
if (arr_[i] != that.arr_[i]) { return false; }
}
return true;
}
/**
* Returns true if this FloatBuffer is sorted.
* @return true if sorted
*/
boolean isSorted() {
return sorted_;
}
/**
* Merges the incoming sorted buffer into this sorted buffer.
* @param bufIn sorted buffer in
* @return this
*/
FloatBuffer mergeSortIn(final FloatBuffer bufIn) {
if (!sorted_ || !bufIn.isSorted()) {
throw new SketchesArgumentException("Both buffers must be sorted.");
}
final float[] arrIn = bufIn.getArray(); //may be larger than its item count.
final int bufInLen = bufIn.getCount();
ensureSpace(bufInLen);
final int totLen = count_ + bufInLen;
if (spaceAtBottom_) { //scan up, insert at bottom
final int tgtStart = capacity_ - totLen;
int i = capacity_ - count_;
int j = bufIn.capacity_ - bufIn.count_;
for (int k = tgtStart; k < capacity_; k++) {
if (i < capacity_ && j < bufIn.capacity_) { //both valid
arr_[k] = arr_[i] <= arrIn[j] ? arr_[i++] : arrIn[j++];
} else if (i < capacity_) { //i is valid
arr_[k] = arr_[i++];
} else if (j < bufIn.capacity_) { //j is valid
arr_[k] = arrIn[j++];
} else {
break;
}
}
} else { //scan down, insert at top
int i = count_ - 1;
int j = bufInLen - 1;
for (int k = totLen; k-- > 0; ) {
if (i >= 0 && j >= 0) { //both valid
arr_[k] = arr_[i] >= arrIn[j] ? arr_[i--] : arrIn[j--];
} else if (i >= 0) { //i is valid
arr_[k] = arr_[i--];
} else if (j >= 0) { //j is valid
arr_[k] = arrIn[j--];
} else {
break;
}
}
}
count_ += bufInLen;
sorted_ = true;
return this;
}
/**
* Sorts the active region;
* @return this
*/
FloatBuffer sort() {
if (sorted_) { return this; }
final int start = spaceAtBottom_ ? capacity_ - count_ : 0;
final int end = spaceAtBottom_ ? capacity_ : count_;
Arrays.sort(arr_, start, end);
sorted_ = true;
return this;
}
// This only serializes count * floats
byte[] floatsToBytes() {
final int bytes = Float.BYTES * count_;
final byte[] arr = new byte[bytes];
final WritableBuffer wbuf = WritableMemory.writableWrap(arr).asWritableBuffer();
if (spaceAtBottom_) {
wbuf.putFloatArray(arr_, capacity_ - count_, count_);
} else {
wbuf.putFloatArray(arr_, 0, count_);
}
assert wbuf.getPosition() == bytes;
return arr;
}
/**
* Returns a printable formatted string of the items of this buffer separated by a single space.
* @param fmt The format for each printed item.
* @param width the number of items to print per line
* @return a printable, formatted string of the items of this buffer.
*/
String toHorizList(final String fmt, final int width) {
final StringBuilder sb = new StringBuilder();
final String spaces = " ";
final int start = spaceAtBottom_ ? capacity_ - count_ : 0;
final int end = spaceAtBottom_ ? capacity_ : count_;
int cnt = 0;
sb.append(spaces);
for (int i = start; i < end; i++) {
final float v = arr_[i];
final String str = String.format(fmt, v);
if (i > start && ++cnt % width == 0) { sb.append(LS).append(spaces); }
sb.append(str);
}
return sb.toString();
}
/**
* Trims the capacity of this FloatBuffer to the active count.
* @return this
*/
FloatBuffer trimCapacity() {
if (count_ < capacity_) {
final float[] out = new float[count_];
final int start = spaceAtBottom_ ? capacity_ - count_ : 0;
System.arraycopy(arr_, start, out, 0, count_);
capacity_ = count_;
arr_ = out;
}
return this;
}
/**
* Trims the count_ to newCount. If newCount > count_ this does nothing and returns.
* Otherwise, the internal count_ is reduced to the given newCount. There is no clearing of
* the remainder of the capacity. Any items there are considered garbage.
*
* @param newCount the new active count
* @return this
*/
FloatBuffer trimCount(final int newCount) {
if (newCount < count_) {
count_ = newCount;
}
return this;
}
}
| 2,671 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/req/ReqSketchSortedView.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.req;
import static org.apache.datasketches.quantilescommon.QuantileSearchCriteria.INCLUSIVE;
import java.util.List;
import org.apache.datasketches.quantilescommon.FloatsSortedView;
import org.apache.datasketches.quantilescommon.InequalitySearch;
import org.apache.datasketches.quantilescommon.QuantileSearchCriteria;
import org.apache.datasketches.quantilescommon.QuantilesAPI;
import org.apache.datasketches.quantilescommon.QuantilesUtil;
/**
* The SortedView of the ReqSketch.
* @author Alexander Saydakov
* @author Lee Rhodes
*/
public final class ReqSketchSortedView implements FloatsSortedView {
private float[] quantiles;
private long[] cumWeights; //comes in as individual weights, converted to cumulative natural weights
private final long totalN;
/**
* Construct from elements for testing.
* @param quantiles sorted array of quantiles
* @param cumWeights sorted, monotonically increasing cumulative weights.
* @param totalN the total number of items presented to the sketch.
*/
ReqSketchSortedView(final float[] quantiles, final long[] cumWeights, final long totalN) {
this.quantiles = quantiles;
this.cumWeights = cumWeights;
this.totalN = totalN;
}
/**
* Constructs this Sorted View given the sketch
* @param sk the given ReqSketch
*/
public ReqSketchSortedView(final ReqSketch sk) {
totalN = sk.getN();
buildSortedViewArrays(sk);
}
@Override
public long[] getCumulativeWeights() {
return cumWeights.clone();
}
@Override
public float getQuantile(final double rank, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new IllegalArgumentException(QuantilesAPI.EMPTY_MSG); }
QuantilesUtil.checkNormalizedRankBounds(rank);
final int len = cumWeights.length;
final long naturalRank = (searchCrit == INCLUSIVE)
? (long)Math.ceil(rank * totalN) : (long)Math.floor(rank * totalN);
final InequalitySearch crit = (searchCrit == INCLUSIVE) ? InequalitySearch.GE : InequalitySearch.GT;
final int index = InequalitySearch.find(cumWeights, 0, len - 1, naturalRank, crit);
if (index == -1) {
return quantiles[quantiles.length - 1]; ///EXCLUSIVE (GT) case: normRank == 1.0;
}
return quantiles[index];
}
@Override
public float[] getQuantiles() {
return quantiles.clone();
}
@Override
public double getRank(final float quantile, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new IllegalArgumentException(QuantilesAPI.EMPTY_MSG); }
final int len = quantiles.length;
final InequalitySearch crit = (searchCrit == INCLUSIVE) ? InequalitySearch.LE : InequalitySearch.LT;
final int index = InequalitySearch.find(quantiles, 0, len - 1, quantile, crit);
if (index == -1) {
return 0; //EXCLUSIVE (LT) case: quantile <= minQuantile; INCLUSIVE (LE) case: quantile < minQuantile
}
return (double)cumWeights[index] / totalN;
}
@Override
public boolean isEmpty() {
return totalN == 0;
}
@Override
public ReqSketchSortedViewIterator iterator() {
return new ReqSketchSortedViewIterator(quantiles, cumWeights);
}
//restricted methods
private void buildSortedViewArrays(final ReqSketch sk) {
final List<ReqCompactor> compactors = sk.getCompactors();
final int numComp = compactors.size();
final int totalQuantiles = sk.getNumRetained();
quantiles = new float[totalQuantiles]; //could have zero entries
cumWeights = new long[totalQuantiles];
int count = 0;
for (int i = 0; i < numComp; i++) {
final ReqCompactor c = compactors.get(i);
final FloatBuffer bufIn = c.getBuffer();
final long bufWeight = 1 << c.getLgWeight();
final int bufInLen = bufIn.getCount();
mergeSortIn(bufIn, bufWeight, count, sk.getHighRankAccuracyMode());
count += bufInLen;
}
createCumulativeNativeRanks();
}
/**
* Specially modified version of FloatBuffer.mergeSortIn(). Here spaceAtBottom is always false and
* the ultimate array size has already been set. However, this must simultaneously deal with
* sorting the base FloatBuffer as well.
*
* @param bufIn given FloatBuffer. If not sorted it will be sorted here.
* @param bufWeight associated weight of input FloatBuffer
* @param count tracks number of items inserted into the class arrays
*/
private void mergeSortIn(final FloatBuffer bufIn, final long bufWeight, final int count, final boolean hra) {
if (!bufIn.isSorted()) { bufIn.sort(); }
final float[] arrIn = bufIn.getArray(); //may be larger than its item count.
final int bufInLen = bufIn.getCount();
final int totLen = count + bufInLen;
int i = count - 1;
int j = bufInLen - 1;
int h = hra ? bufIn.getCapacity() - 1 : bufInLen - 1;
for (int k = totLen; k-- > 0; ) {
if (i >= 0 && j >= 0) { //both valid
if (quantiles[i] >= arrIn[h]) {
quantiles[k] = quantiles[i];
cumWeights[k] = cumWeights[i--]; //not yet natRanks, just individual wts
} else {
quantiles[k] = arrIn[h--]; j--;
cumWeights[k] = bufWeight;
}
} else if (i >= 0) { //i is valid
quantiles[k] = quantiles[i];
cumWeights[k] = cumWeights[i--];
} else if (j >= 0) { //j is valid
quantiles[k] = arrIn[h--]; j--;
cumWeights[k] = bufWeight;
} else {
break;
}
}
}
private void createCumulativeNativeRanks() {
final int len = quantiles.length;
for (int i = 1; i < len; i++) {
cumWeights[i] += cumWeights[i - 1];
}
if (totalN > 0) {
assert cumWeights[len - 1] == totalN;
}
}
}
| 2,672 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/req/ReqCompactor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.req;
import static java.lang.Math.round;
import static org.apache.datasketches.common.Util.numberOfTrailingOnes;
import static org.apache.datasketches.req.BaseReqSketch.INIT_NUMBER_OF_SECTIONS;
import static org.apache.datasketches.req.ReqSketch.MIN_K;
import static org.apache.datasketches.req.ReqSketch.NOM_CAP_MULT;
import java.util.Random;
import org.apache.datasketches.memory.WritableBuffer;
import org.apache.datasketches.memory.WritableMemory;
import org.apache.datasketches.req.ReqSketch.CompactorReturn;
/**
* The compactor class for the ReqSketch
* @author Lee Rhodes
*/
class ReqCompactor {
//finals
private static final double SQRT2 = Math.sqrt(2.0);
private final byte lgWeight;
private final boolean hra;
//state variables
private long state; //State of the deterministic compaction schedule
private float sectionSizeFlt;
private int sectionSize; //initialized with k, minimum 4
private byte numSections; //# of sections, initial size 3
private boolean coin; //true or false at random for each compaction
//objects
private FloatBuffer buf;
private final ReqDebug reqDebug = null;
/**
* Normal Constructor
* @param lgWeight the lgWeight of this compactor
* @param hra High Rank Accuracy
* @param sectionSize initially the size of k
* @param reqDebug The debug signaling interface
*/
ReqCompactor(
final byte lgWeight,
final boolean hra,
final int sectionSize,
final ReqDebug reqDebug) {
this.lgWeight = lgWeight;
this.hra = hra;
this.sectionSize = sectionSize;
sectionSizeFlt = sectionSize;
state = 0;
coin = false;
numSections = INIT_NUMBER_OF_SECTIONS;
final int nomCap = getNomCapacity();
buf = new FloatBuffer(2 * nomCap, nomCap, hra);
}
/**
* Copy Constructor
* @param other the compactor to be copied into this one
*/
ReqCompactor(final ReqCompactor other) {
lgWeight = other.lgWeight;
hra = other.hra;
sectionSizeFlt = other.sectionSizeFlt;
numSections = other.numSections;
sectionSize = other.sectionSize;
state = other.state;
coin = other.coin;
buf = new FloatBuffer(other.buf);
}
/**
* Construct from elements. The buffer will need to be constructed first
*/
ReqCompactor(
final byte lgWeight,
final boolean hra,
final long state,
final float sectionSizeFlt,
final byte numSections,
final FloatBuffer buf) {
this.lgWeight = lgWeight;
this.hra = hra;
this.buf = buf;
this.sectionSizeFlt = sectionSizeFlt;
this.numSections = numSections;
this.state = state;
coin = false;
sectionSize = nearestEven(sectionSizeFlt);
//ReqDebug left at null
}
/**
* Perform a compaction operation on this compactor
* @return the array of items to be promoted to the next level compactor
*/
FloatBuffer compact(final CompactorReturn cReturn, final Random rand) {
if (reqDebug != null) { reqDebug.emitCompactingStart(lgWeight); }
final int startRetItems = buf.getCount();
final int startNomCap = getNomCapacity();
// choose a part of the buffer to compact
final int secsToCompact = Math.min(numberOfTrailingOnes(state) + 1, numSections);
final long compactionRange = computeCompactionRange(secsToCompact);
final int compactionStart = (int) (compactionRange & 0xFFFF_FFFFL); //low 32
final int compactionEnd = (int) (compactionRange >>> 32); //high 32
assert compactionEnd - compactionStart >= 2;
if ((state & 1L) == 1L) { coin = !coin; } //if numCompactions odd, flip coin;
else { coin = rand.nextBoolean(); } //random coin flip
final FloatBuffer promote = buf.getEvensOrOdds(compactionStart, compactionEnd, coin);
if (reqDebug != null) {
reqDebug.emitCompactionDetail(compactionStart, compactionEnd, secsToCompact,
promote.getCount(), coin);
}
buf.trimCount(buf.getCount() - (compactionEnd - compactionStart));
state += 1;
ensureEnoughSections();
cReturn.deltaRetItems = buf.getCount() - startRetItems + promote.getCount();
cReturn.deltaNomSize = getNomCapacity() - startNomCap;
if (reqDebug != null) { reqDebug.emitCompactionDone(lgWeight); }
return promote;
} //End Compact
/**
* Gets a reference to this compactor's internal FloatBuffer
* @return a reference to this compactor's internal FloatBuffer
*/
FloatBuffer getBuffer() { return buf; }
boolean getCoin() {
return coin;
}
/**
* Gets the lgWeight of this buffer
* @return the lgWeight of this buffer
*/
byte getLgWeight() {
return lgWeight;
}
/**
* Gets the current nominal capacity of this compactor.
* @return the current nominal capacity of this compactor.
*/
final int getNomCapacity() { //called from constructor
return NOM_CAP_MULT * numSections * sectionSize;
}
/**
* Serialize state(8) sectionSizeFlt(4), numSections(1), lgWeight(1), pad(2), count(4) + floatArr
* @return required bytes to serialize.
*/
int getSerializationBytes() {
final int count = buf.getCount();
return 8 + 4 + 1 + 1 + 2 + 4 + count * Float.BYTES; // 20 + array
}
int getNumSections() {
return numSections;
}
int getSectionSize() {
return sectionSize;
}
float getSectionSizeFlt() {
return sectionSizeFlt;
}
long getState() {
return state;
}
boolean isHighRankAccuracy() {
return hra;
}
/**
* Merge the other given compactor into this one. They both must have the
* same lgWeight
* @param other the other given compactor
* @return this
*/
ReqCompactor merge(final ReqCompactor other) {
assert lgWeight == other.lgWeight;
state |= other.state;
while (ensureEnoughSections()) {}
buf.sort();
final FloatBuffer otherBuf = new FloatBuffer(other.buf);
otherBuf.sort();
if (otherBuf.getCount() > buf.getCount()) {
otherBuf.mergeSortIn(buf);
buf = otherBuf;
} else {
buf.mergeSortIn(otherBuf);
}
return this;
}
/**
* Adjust the sectionSize and numSections if possible.
* @return true if the SectionSize and NumSections were adjusted.
*/
private boolean ensureEnoughSections() {
final float szf;
final int ne;
if (state >= 1L << numSections - 1
&& sectionSize > MIN_K
&& (ne = nearestEven(szf = (float)(sectionSizeFlt / SQRT2))) >= MIN_K)
{
sectionSizeFlt = szf;
sectionSize = ne;
numSections <<= 1;
buf.ensureCapacity(2 * getNomCapacity());
if (reqDebug != null) { reqDebug.emitAdjSecSizeNumSec(lgWeight); }
return true;
}
return false;
}
/**
* Computes the start and end indices of the compacted region
* @param secsToCompact the number of contiguous sections to compact
* @return the start and end indices of the compacted region
*/
private long computeCompactionRange(final int secsToCompact) {
final int bufLen = buf.getCount();
int nonCompact = getNomCapacity() / 2 + (numSections - secsToCompact) * sectionSize;
//make compacted region even:
nonCompact = (bufLen - nonCompact & 1) == 1 ? nonCompact + 1 : nonCompact;
final long low = hra ? 0 : nonCompact;
final long high = hra ? bufLen - nonCompact : bufLen;
return (high << 32) + low;
}
/**
* Returns the nearest even integer to the given float. Also used by test.
* @param fltVal the given float
* @return the nearest even integer to the given float.
*/
static final int nearestEven(final float fltVal) {
return (int) round(fltVal / 2.0) << 1;
}
/**
* ReqCompactor SERIALIZATION FORMAT.
*
* <p>Low significance bytes of this data structure are on the right just for visualization.
* The multi-byte primitives are stored in native byte order.
* The <i>byte</i> primitives are treated as unsigned. Multibyte primitives are indicated with "*" and
* their size depends on the specific implementation.</p>
*
* <p>The binary format for a compactor: </p>
*
* <pre>
* Binary Format. Starting offset is either 24 or 8, both are 8-byte aligned.
*
* +Long Adr / +Byte Offset
* || 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
* 0 ||-----------------------------state-------------------------------------|
*
* || 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 |
* 1 ||----(empty)------|-#Sects-|--lgWt--|------------sectionSizeFlt---------|
*
* || | | | | | | | 16 |
* 2 ||--------------floats[]-------------|---------------count---------------|
*
* </pre>
*/
byte[] toByteArray() {
final int bytes = getSerializationBytes();
final byte[] arr = new byte[bytes];
final WritableBuffer wbuf = WritableMemory.writableWrap(arr).asWritableBuffer();
wbuf.putLong(state);
wbuf.putFloat(sectionSizeFlt);
wbuf.putByte(lgWeight);
wbuf.putByte(numSections);
wbuf.incrementPosition(2); //pad 2
//buf.sort(); //sort if necessary
wbuf.putInt(buf.getCount()); //count
wbuf.putByteArray(buf.floatsToBytes(), 0, Float.BYTES * buf.getCount());
assert wbuf.getPosition() == bytes;
return arr;
}
/**
* Returns a printable formatted prefix string summarizing the list.
* The first number is the compactor height. the second number in brackets is the current count
* of the compactor buffer. The third number in brackets is the nominal capacity of the compactor.
* @return a printable formatted prefix string summarizing the list.
*/
String toListPrefix() {
final int h = getLgWeight();
final int len = buf.getCount();
final int nom = getNomCapacity();
final int secSz = getSectionSize();
final int numSec = getNumSections();
final long num = getState();
final String prefix = String.format(
" C:%d Len:%d NomSz:%d SecSz:%d NumSec:%d State:%d",
h, len, nom, secSz, numSec, num);
return prefix;
}
}
| 2,673 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/req/ReqSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.req;
import static org.apache.datasketches.quantilescommon.QuantileSearchCriteria.INCLUSIVE;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.apache.datasketches.common.SketchesArgumentException;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.quantilescommon.FloatsSortedView;
import org.apache.datasketches.quantilescommon.QuantileSearchCriteria;
import org.apache.datasketches.quantilescommon.QuantilesAPI;
import org.apache.datasketches.quantilescommon.QuantilesFloatsSketchIterator;
/**
* This Relative Error Quantiles Sketch is the Java implementation based on the paper
* "Relative Error Streaming Quantiles" by Graham Cormode, Zohar Karnin, Edo Liberty,
* Justin Thaler, Pavel Veselý, and loosely derived from a Python prototype written by Pavel Veselý.
*
* <p>Reference: https://arxiv.org/abs/2004.01668</p>
*
* <p>This implementation differs from the algorithm described in the paper in the following:</p>
*
* <ul>
* <li>The algorithm requires no upper bound on the stream length.
* Instead, each relative-compactor counts the number of compaction operations performed
* so far (via variable state). Initially, the relative-compactor starts with INIT_NUMBER_OF_SECTIONS.
* Each time the number of compactions (variable state) exceeds 2^{numSections - 1}, we double
* numSections. Note that after merging the sketch with another one variable state may not correspond
* to the number of compactions performed at a particular level, however, since the state variable
* never exceeds the number of compactions, the guarantees of the sketch remain valid.</li>
*
* <li>The size of each section (variable k and sectionSize in the code and parameter k in
* the paper) is initialized with a number set by the user via variable k.
* When the number of sections doubles, we decrease sectionSize by a factor of sqrt(2).
* This is applied at each level separately. Thus, when we double the number of sections, the
* nominal compactor size increases by a factor of approx. sqrt(2) (+/- rounding).</li>
*
* <li>The merge operation here does not perform "special compactions", which are used in the paper
* to allow for a tight mathematical analysis of the sketch.</li>
* </ul>
*
* <p>This implementation provides a number of capabilities not discussed in the paper or provided
* in the Python prototype.</p>
*
* <ul><li>The Python prototype only implemented high accuracy for low ranks. This implementation
* provides the user with the ability to choose either high rank accuracy or low rank accuracy at
* the time of sketch construction.</li>
* <li>The Python prototype only implemented a comparison criterion of "INCLUSIVE". This implementation
* allows the user to switch back and forth between the "INCLUSIVE" criterion and the "EXCLUSIVE" criterion.</li>
* <li>This implementation provides extensive debug visibility into the operation of the sketch with
* two levels of detail output. This is not only useful for debugging, but is a powerful tool to
* help users understand how the sketch works.</li>
* </ul>
*
* @see QuantilesAPI
*
* @author Edo Liberty
* @author Pavel Vesely
* @author Lee Rhodes
*/
public final class ReqSketch extends BaseReqSketch {
static class CompactorReturn {
int deltaRetItems;
int deltaNomSize;
}
//static finals
private static final String LS = System.getProperty("line.separator");
static final byte MIN_K = 4;
static final byte NOM_CAP_MULT = 2;
//finals
private final int k; //default is 12 (1% @ 95% Confidence)
private final boolean hra; //default is true
//state variables
private long totalN = 0;
private float minItem = Float.NaN;
private float maxItem = Float.NaN;
//computed from compactors
private int retItems = 0; //number of retained items in the sketch
private int maxNomSize = 0; //sum of nominal capacities of all compactors
//Objects
private ReqSketchSortedView reqSV = null;
private List<ReqCompactor> compactors = new ArrayList<>();
private ReqDebug reqDebug = null; //user config, default: null, can be set after construction.
private final CompactorReturn cReturn = new CompactorReturn(); //used in compress()
private final Random rand;
/**
* Construct from elements. After sketch is constructed, retItems and maxNomSize must be computed.
* Used by ReqSerDe.
*/
ReqSketch(final int k, final boolean hra, final long totalN, final float minItem,
final float maxItem, final List<ReqCompactor> compactors) {
checkK(k);
this.k = k;
this.hra = hra;
this.totalN = totalN;
this.minItem = minItem;
this.maxItem = maxItem;
this.compactors = compactors;
this.rand = new Random();
}
/**
* Normal Constructor used by ReqSketchBuilder.
* @param k Controls the size and error of the sketch. It must be even and in the range
* [4, 1024].
* The default number 12 roughly corresponds to 1% relative error guarantee at 95% confidence.
* @param highRankAccuracy if true, the default, the high ranks are prioritized for better
* accuracy. Otherwise the low ranks are prioritized for better accuracy.
* @param reqDebug the debug handler. It may be null.
*/
ReqSketch(final int k, final boolean highRankAccuracy, final ReqDebug reqDebug) {
checkK(k);
this.k = k;
this.hra = highRankAccuracy;
this.reqDebug = reqDebug;
this.rand = (reqDebug == null) ? new Random() : new Random(1);
grow();
}
/**
* Copy Constructor. Only used in test.
* @param other the other sketch to be deep copied into this one.
*/
ReqSketch(final ReqSketch other) {
this.k = other.k;
this.hra = other.hra;
this.totalN = other.totalN;
this.retItems = other.retItems;
this.maxNomSize = other.maxNomSize;
this.minItem = other.minItem;
this.maxItem = other.maxItem;
this.reqDebug = other.reqDebug;
this.reqSV = null;
this.rand = (reqDebug == null) ? new Random() : new Random(1);
for (int i = 0; i < other.getNumLevels(); i++) {
compactors.add(new ReqCompactor(other.compactors.get(i)));
}
}
/**
* Returns a new ReqSketchBuilder
* @return a new ReqSketchBuilder
*/
public static final ReqSketchBuilder builder() {
return new ReqSketchBuilder();
}
/**
* Returns an ReqSketch on the heap from a Memory image of the sketch.
* @param mem The Memory object holding a valid image of an ReqSketch
* @return an ReqSketch on the heap from a Memory image of the sketch.
*/
public static ReqSketch heapify(final Memory mem) {
return ReqSerDe.heapify(mem);
}
@Override
public int getK() {
return k;
}
/**
* This checks the given float array to make sure that it contains only finite numbers
* and is monotonically increasing.
* @param splits the given array
*/
static void validateSplits(final float[] splits) {
final int len = splits.length;
for (int i = 0; i < len; i++) {
final float v = splits[i];
if (!Float.isFinite(v)) {
throw new SketchesArgumentException("Numbers must be finite");
}
if (i < len - 1 && v >= splits[i + 1]) {
throw new SketchesArgumentException(
"Numbers must be unique and monotonically increasing");
}
}
}
@Override
public double[] getCDF(final float[] splitPoints, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new IllegalArgumentException(QuantilesAPI.EMPTY_MSG); }
refreshSortedView();
return reqSV.getCDF(splitPoints, searchCrit);
}
@Override
public boolean getHighRankAccuracyMode() {
return hra;
}
@Override
public float getMaxItem() {
if (isEmpty()) { throw new IllegalArgumentException(QuantilesAPI.EMPTY_MSG); }
return maxItem;
}
@Override
public float getMinItem() {
if (isEmpty()) { throw new IllegalArgumentException(QuantilesAPI.EMPTY_MSG); }
return minItem;
}
@Override
public long getN() {
return totalN;
}
@Override
public double[] getPMF(final float[] splitPoints, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new IllegalArgumentException(QuantilesAPI.EMPTY_MSG); }
refreshSortedView();
return reqSV.getPMF(splitPoints, searchCrit);
}
@Override
public float getQuantile(final double normRank, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new IllegalArgumentException(QuantilesAPI.EMPTY_MSG); }
if (normRank < 0 || normRank > 1.0) {
throw new SketchesArgumentException(
"Normalized rank must be in the range [0.0, 1.0]: " + normRank);
}
refreshSortedView();
return reqSV.getQuantile(normRank, searchCrit);
}
@Override
public float[] getQuantiles(final double[] normRanks, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new IllegalArgumentException(QuantilesAPI.EMPTY_MSG); }
refreshSortedView();
final int len = normRanks.length;
final float[] qArr = new float[len];
for (int i = 0; i < len; i++) {
qArr[i] = reqSV.getQuantile(normRanks[i], searchCrit);
}
return qArr;
}
/**
* {@inheritDoc}
* The approximate probability that the true quantile is within the confidence interval
* specified by the upper and lower quantile bounds for this sketch is 0.95.
*/
@Override
public float getQuantileLowerBound(final double rank) {
return getQuantile(getRankLowerBound(rank, 2), INCLUSIVE);
}
@Override
public float getQuantileLowerBound(final double rank, final int numStdDev) {
return getQuantile(getRankLowerBound(rank, numStdDev), INCLUSIVE);
}
/**
* {@inheritDoc}
* The approximate probability that the true quantile is within the confidence interval
* specified by the upper and lower quantile bounds for this sketch is 0.95.
*/
@Override
public float getQuantileUpperBound(final double rank) {
return getQuantile(getRankUpperBound(rank, 2), INCLUSIVE);
}
@Override
public float getQuantileUpperBound(final double rank, final int numStdDev) {
return getQuantile(getRankUpperBound(rank, numStdDev), INCLUSIVE);
}
@Override
public double getRank(final float quantile, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new IllegalArgumentException(QuantilesAPI.EMPTY_MSG); }
refreshSortedView();
return reqSV.getRank(quantile, searchCrit);
}
/**
* {@inheritDoc}
* The approximate probability that the true rank is within the confidence interval
* specified by the upper and lower rank bounds for this sketch is 0.95.
*/
@Override
public double getRankLowerBound(final double rank) {
return getRankLB(k, getNumLevels(), rank, 2, hra, getN());
}
@Override
public double getRankLowerBound(final double rank, final int numStdDev) {
return getRankLB(k, getNumLevels(), rank, numStdDev, hra, getN());
}
@Override
public double[] getRanks(final float[] quantiles, final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new IllegalArgumentException(QuantilesAPI.EMPTY_MSG); }
refreshSortedView();
final int numQuantiles = quantiles.length;
final double[] retArr = new double[numQuantiles];
for (int i = 0; i < numQuantiles; i++) {
retArr[i] = reqSV.getRank(quantiles[i], searchCrit); //already normalized
}
return retArr;
}
/**
* {@inheritDoc}
* The approximate probability that the true rank is within the confidence interval
* specified by the upper and lower rank bounds for this sketch is 0.95.
*/
@Override
public double getRankUpperBound(final double rank) {
return getRankUB(k, getNumLevels(), rank, 2, hra, getN());
}
@Override
public double getRankUpperBound(final double rank, final int numStdDev) {
return getRankUB(k, getNumLevels(), rank, numStdDev, hra, getN());
}
@Override
public int getNumRetained() { return retItems; }
@Override
public int getSerializedSizeBytes() {
final ReqSerDe.SerDeFormat serDeFormat = ReqSerDe.getSerFormat(this);
return ReqSerDe.getSerBytes(this, serDeFormat);
}
@Override
public FloatsSortedView getSortedView() {
refreshSortedView();
return reqSV;
}
@Override
public boolean isEmpty() {
return totalN == 0;
}
@Override
public boolean isEstimationMode() {
return getNumLevels() > 1;
}
@Override
public QuantilesFloatsSketchIterator iterator() {
return new ReqSketchIterator(this);
}
@Override
public ReqSketch merge(final ReqSketch other) {
if (other == null || other.isEmpty()) { return this; }
if (other.hra != hra) {
throw new SketchesArgumentException(
"Both sketches must have the same HighRankAccuracy setting.");
}
totalN += other.totalN;
//update min, max items, n
if (Float.isNaN(minItem) || other.minItem < minItem) { minItem = other.minItem; }
if (Float.isNaN(maxItem) || other.maxItem > maxItem) { maxItem = other.maxItem; }
//Grow until self has at least as many compactors as other
while (getNumLevels() < other.getNumLevels()) { grow(); }
//Merge the items in all height compactors
for (int i = 0; i < other.getNumLevels(); i++) {
compactors.get(i).merge(other.compactors.get(i));
}
maxNomSize = computeMaxNomSize();
retItems = computeTotalRetainedItems();
if (retItems >= maxNomSize) {
compress();
}
assert retItems < maxNomSize;
reqSV = null;
return this;
}
@Override
public void reset() {
totalN = 0;
retItems = 0;
maxNomSize = 0;
minItem = Float.NaN;
maxItem = Float.NaN;
reqSV = null;
compactors = new ArrayList<>();
grow();
}
@Override
public byte[] toByteArray() {
return ReqSerDe.toByteArray(this);
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("**********Relative Error Quantiles Sketch Summary**********").append(LS);
sb.append(" K : " + k).append(LS);
sb.append(" N : " + totalN).append(LS);
sb.append(" Retained Items : " + retItems).append(LS);
sb.append(" Min Item : " + minItem).append(LS);
sb.append(" Max Item : " + maxItem).append(LS);
sb.append(" Estimation Mode : " + isEstimationMode()).append(LS);
sb.append(" High Rank Acc : " + hra).append(LS);
sb.append(" Levels : " + compactors.size()).append(LS);
sb.append("************************End Summary************************").append(LS);
return sb.toString();
}
@Override
public void update(final float item) {
if (Float.isNaN(item)) { return; }
if (isEmpty()) {
minItem = item;
maxItem = item;
} else {
if (item < minItem) { minItem = item; }
if (item > maxItem) { maxItem = item; }
}
final FloatBuffer buf = compactors.get(0).getBuffer();
buf.append(item);
retItems++;
totalN++;
if (retItems >= maxNomSize) {
buf.sort();
compress();
}
reqSV = null;
}
@Override
public String viewCompactorDetail(final String fmt, final boolean allData) {
final StringBuilder sb = new StringBuilder();
sb.append("*********Relative Error Quantiles Compactor Detail*********").append(LS);
sb.append("Compactor Detail: Ret Items: ").append(getNumRetained())
.append(" N: ").append(getN());
sb.append(LS);
for (int i = 0; i < getNumLevels(); i++) {
final ReqCompactor c = compactors.get(i);
sb.append(c.toListPrefix()).append(LS);
if (allData) { sb.append(c.getBuffer().toHorizList(fmt, 20)).append(LS); }
}
sb.append("************************End Detail*************************").append(LS);
return sb.toString();
}
/**
* Computes a new bound for determining when to compress the sketch.
*/
int computeMaxNomSize() {
int cap = 0;
for (final ReqCompactor c : compactors) { cap += c.getNomCapacity(); }
return cap;
}
/**
* Computes the retained Items for the sketch.
*/
int computeTotalRetainedItems() {
int count = 0;
for (final ReqCompactor c : compactors) {
count += c.getBuffer().getCount();
}
return count;
}
List<ReqCompactor> getCompactors() {
return compactors;
}
int getMaxNomSize() {
return maxNomSize;
}
/**
* Gets the number of levels of compactors in the sketch.
* @return the number of levels of compactors in the sketch.
*/
int getNumLevels() {
return compactors.size();
}
void setMaxNomSize(final int maxNomSize) {
this.maxNomSize = maxNomSize;
}
void setRetainedItems(final int retItems) {
this.retItems = retItems;
}
private static void checkK(final int k) {
if ((k & 1) > 0 || k < 4 || k > 1024) {
throw new SketchesArgumentException(
"<i>K</i> must be even and in the range [4, 1024]: " + k );
}
}
private void compress() {
if (reqDebug != null) { reqDebug.emitStartCompress(); }
for (int h = 0; h < compactors.size(); h++) {
final ReqCompactor c = compactors.get(h);
final int compRetItems = c.getBuffer().getCount();
final int compNomCap = c.getNomCapacity();
if (compRetItems >= compNomCap) {
if (h + 1 >= getNumLevels()) { //at the top?
if (reqDebug != null) { reqDebug.emitMustAddCompactor(); }
grow(); //add a level, increases maxNomSize
}
final FloatBuffer promoted = c.compact(cReturn, this.rand);
compactors.get(h + 1).getBuffer().mergeSortIn(promoted);
retItems += cReturn.deltaRetItems;
maxNomSize += cReturn.deltaNomSize;
//we specifically decided not to do lazy compression.
}
}
reqSV = null;
if (reqDebug != null) { reqDebug.emitCompressDone(); }
}
private void grow() {
final byte lgWeight = (byte)getNumLevels();
if (lgWeight == 0 && reqDebug != null) { reqDebug.emitStart(this); }
compactors.add(new ReqCompactor(lgWeight, hra, k, reqDebug));
maxNomSize = computeMaxNomSize();
if (reqDebug != null) { reqDebug.emitNewCompactor(lgWeight); }
}
private final void refreshSortedView() {
reqSV = (reqSV == null) ? new ReqSketchSortedView(this) : reqSV;
}
}
| 2,674 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/req/ReqSketchSortedViewIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.req;
import static org.apache.datasketches.quantilescommon.QuantileSearchCriteria.INCLUSIVE;
import org.apache.datasketches.quantilescommon.FloatsSortedViewIterator;
import org.apache.datasketches.quantilescommon.QuantileSearchCriteria;
/**
* Iterator over ReqSketchSortedView.
* @author Alexander Saydakov
* @author Lee Rhodes
*/
public final class ReqSketchSortedViewIterator implements FloatsSortedViewIterator {
private final float[] quantiles;
private final long[] cumWeights;
private final long totalN;
private int index;
ReqSketchSortedViewIterator(final float[] quantiles, final long[] cumWeights) {
this.quantiles = quantiles;
this.cumWeights = cumWeights;
this.totalN = (cumWeights.length > 0) ? cumWeights[cumWeights.length - 1] : 0;
index = -1;
}
@Override
public long getCumulativeWeight(final QuantileSearchCriteria searchCrit) {
if (searchCrit == INCLUSIVE) { return cumWeights[index]; }
return (index == 0) ? 0 : cumWeights[index - 1];
}
@Override
public long getN() {
return totalN;
}
@Override
public double getNormalizedRank(final QuantileSearchCriteria searchCrit) {
return (double) getCumulativeWeight(searchCrit) / totalN;
}
@Override
public float getQuantile() {
return quantiles[index];
}
@Override
public long getWeight() {
if (index == 0) { return cumWeights[0]; }
return cumWeights[index] - cumWeights[index - 1];
}
@Override
public boolean next() {
index++;
return index < quantiles.length;
}
}
| 2,675 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/req/ReqSketchBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.req;
import static org.apache.datasketches.common.Util.LS;
import static org.apache.datasketches.common.Util.TAB;
/**
* For building a new ReqSketch
*
* @author Lee Rhodes
*/
public class ReqSketchBuilder {
private final static int DEFAULT_K = 12;
private int bK = DEFAULT_K;
private boolean bHRA;
private ReqDebug bReqDebug;
/**
* Constructor for the ReqSketchBuilder.
*/
public ReqSketchBuilder() {
bK = DEFAULT_K;
bHRA = true;
bReqDebug = null;
}
/**
* Returns a new ReqSketch with the current configuration of the builder.
* @return a new ReqSketch
*/
public ReqSketch build() {
final ReqSketch sk = new ReqSketch(bK, bHRA, bReqDebug);
return sk;
}
/**
* Gets the builder configured High Rank Accuracy.
* @return the builder configured High Rank Accuracy.
*/
public boolean getHighRankAccuracy() {
return bHRA;
}
/**
* Gets the builder configured k.
* @return the builder configured k.
*/
public int getK() {
return bK;
}
/**
* Gets the builder configured ReqDebug
* @return the builder configured ReqDebug, or null.
*/
public ReqDebug getReqDebug() {
return bReqDebug;
}
/**
* This sets the parameter highRankAccuracy.
* @param hra See <i>ReqSketch#ReqSketch(int, boolean, ReqDebug)</i>
* @return this
*/
public ReqSketchBuilder setHighRankAccuracy(final boolean hra) {
bHRA = hra;
return this;
}
/**
* This sets the parameter k.
* @param k See <i>ReqSketch#ReqSketch(int, boolean, ReqDebug)</i>
* @return this
*/
public ReqSketchBuilder setK(final int k) {
bK = k;
return this;
}
/**
* This sets the parameter reqDebug.
* @param reqDebug See <i>ReqSketch#ReqSketch(int, boolean, ReqDebug)</i>
* @return this
*/
public ReqSketchBuilder setReqDebug(final ReqDebug reqDebug) {
bReqDebug = reqDebug;
return this;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("ReqSketchBuilder configuration:").append(LS);
sb.append("K:").append(TAB).append(bK).append(LS);
sb.append("HRA:").append(TAB).append(bHRA).append(LS);
final String valid = bReqDebug != null ? "valid" : "invalid";
sb.append("ReqDebug:").append(TAB).append(valid).append(LS);
return sb.toString();
}
}
| 2,676 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/req/ReqSerDe.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.req;
import static java.lang.Math.max;
import static java.lang.Math.min;
import static java.lang.Math.round;
import java.util.ArrayList;
import java.util.List;
import org.apache.datasketches.memory.Buffer;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableBuffer;
import org.apache.datasketches.memory.WritableMemory;
/**
* This class handles serialization and deserialization.
*
* <p>ReqSketch SERIALIZATION FORMAT.</p>
*
* <p>Low significance bytes of this data structure are on the right just for visualization.
* The multi-byte primitives are stored in native byte order.
* The <i>byte</i> primitives are treated as unsigned. Multibyte primitives are indicated with "*" and
* their size depends on the specific implementation.</p>
*
* <p>The ESTIMATION binary format for an estimating sketch with > one item: </p>
*
* <pre>
* Normal Binary Format:
* PreInts=4
* Empty=false
* RawItems=false
* # Constructors > 1, C0 to Cm, whatever is required
*
* Long Adr / Byte Offset
* || 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
* 0 || (empty)| #Ctors | K | Flags |FamID=17| SerVer | PreInts = 4 |
*
* || 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 |
* 1 ||-----------------------------------N-----------------------------------------------|
*
* || | | | | | | | 16 |
* ||--------------MaxItem*----------------------|--------------MinItem*----------------|
*
* || | | | | | | | |
* ||----------------C1*-------------------------|----------------C0*-------------------|
* </pre>
*
* <p>An EXACT-binary format sketch has only one serialized compactor: </p>
*
* <pre>
* PreInts=2
* Empty=false
* RawItems=false
* # Constructors=C0=1
*
* Long Adr / Byte Offset
* || 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
* 0 || (empty)| 1 | K | Flags |FamID=17| SerVer | PreInts = 2 |
*
* || | | | | | | | 8 |
* 1 || |-------------------------C0*-------------------|
* </pre>
*
* <p>A RAW ITEMS binary format sketch has only a few items: </p>
*
* <pre>
* PreInts=2
* Empty=false
* RawItems=true
* # Constructors=C0=1
*
* Long Adr / Byte Offset
* || 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
* 0 || #Raw | 1 | K | Flags |FamID=17| SerVer | PreInts = 2 |
*
* || | | | | | | | 8 |
* 1 || |------------------------ITEM*-----------------|
* </pre>
*
* <p>An EMPTY binary format sketch has only 8 bytes including a reserved empty byte:
*
* <pre>
* PreInts=2
* Empty=true
* RawItems=false
* # Constructors==C0=1
*
* Long Adr / Byte Offset
* || 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
* 0 || (empty)| 0 | K | Flags |FamID=17| SerVer | PreInts = 2 |
* </pre>
* <pre>
* <p>Flags:</p>
* Bit 0 : Endianness, reserved
* Bit 1 : ReadOnly, reserved
* Bit 2 : Empty
* Bit 3 : HRA
* Bit 4 : Raw Items
* Bit 5 : L0 Sorted
* Bit 6 : reserved
* Bit 7 : reserved
* </pre>
*
* @author Lee Rhodes
*/
class ReqSerDe {
enum SerDeFormat { EMPTY, RAWITEMS, EXACT, ESTIMATION }
private static final byte SER_VER = 1;
private static final byte FAMILY_ID = 17;
static ReqSketch heapify(final Memory mem) {
final Buffer buff = mem.asBuffer();
//Extract first 8 bytes
final byte preInts = buff.getByte();
final byte serVer = buff.getByte();
assert serVer == (byte)1;
final byte familyId = buff.getByte();
assert familyId == 17;
// Extract flags
final int flags = buff.getByte() & 0xFF;
final boolean empty = (flags & 4) > 0;
final boolean hra = (flags & 8) > 0;
final boolean rawItems = (flags & 16) > 0;
final boolean lvl0Sorted = (flags & 32) > 0;
// remainder fields
final int k = buff.getShort() & 0xFFFF;
final int numCompactors = buff.getByte() & 0xFF;
final int numRawItems = buff.getByte() & 0xFF;
// extract different serialization formats
final SerDeFormat deserFormat = getDeserFormat(empty, rawItems, numCompactors);
switch (deserFormat) {
case EMPTY: {
assert preInts == 2;
return new ReqSketch(k, hra, null);
}
case RAWITEMS: {
assert preInts == 2;
final ReqSketch sk = new ReqSketch(k, hra, null);
for (int i = 0; i < numRawItems; i++) { sk.update(buff.getFloat()); }
return sk;
}
case EXACT: {
assert preInts == 2;
final Compactor compactor = extractCompactor(buff, lvl0Sorted, hra);
//Construct sketch
final long totalN = compactor.count;
final float minItem = compactor.minItem;
final float maxItem = compactor.maxItem;
final List<ReqCompactor> compactors = new ArrayList<>();
compactors.add(compactor.reqCompactor);
final ReqSketch sk = new ReqSketch(k, hra, totalN, minItem, maxItem, compactors);
sk.setMaxNomSize(sk.computeMaxNomSize());
sk.setRetainedItems(sk.computeTotalRetainedItems());
return sk;
}
default: { //ESTIMATION
assert preInts == 4;
final long totalN = buff.getLong();
final float minItem = buff.getFloat();
final float maxItem = buff.getFloat();
final List<ReqCompactor> compactors = new ArrayList<>();
for (int i = 0; i < numCompactors; i++) {
final boolean level0sorted = i == 0 ? lvl0Sorted : true;
final Compactor compactor = extractCompactor(buff, level0sorted, hra);
compactors.add(compactor.reqCompactor);
}
final ReqSketch sk = new ReqSketch(k, hra, totalN, minItem, maxItem, compactors);
sk.setMaxNomSize(sk.computeMaxNomSize());
sk.setRetainedItems(sk.computeTotalRetainedItems());
return sk;
}
}
}
static final Compactor extractCompactor(final Buffer buff, final boolean lvl0Sorted,
final boolean hra) {
final long state = buff.getLong();
final float sectionSizeFlt = buff.getFloat();
final int sectionSize = round(sectionSizeFlt);
final byte lgWt = buff.getByte();
final byte numSections = buff.getByte();
buff.incrementPosition(2);
final int count = buff.getInt();
final float[] arr = new float[count];
buff.getFloatArray(arr, 0, count);
float minItem = Float.MAX_VALUE;
float maxItem = Float.MIN_VALUE;
for (int i = 0; i < count; i++) {
minItem = min(minItem, arr[i]);
maxItem = max(maxItem, arr[i]);
}
final int delta = 2 * sectionSize * numSections;
final int nomCap = 2 * delta;
final int cap = max(count, nomCap);
final FloatBuffer fltBuf = FloatBuffer.reconstruct(arr, count, cap, delta, lvl0Sorted, hra);
final ReqCompactor reqCompactor =
new ReqCompactor(lgWt, hra, state, sectionSizeFlt, numSections, fltBuf);
return new Compactor(reqCompactor, minItem, maxItem, count);
}
static class Compactor {
ReqCompactor reqCompactor;
float minItem;
float maxItem;
int count;
Compactor(final ReqCompactor reqCompactor, final float minItem, final float maxItem,
final int count) {
this.reqCompactor = reqCompactor;
this.minItem = minItem;
this.maxItem = maxItem;
this.count = count;
}
}
private static byte getFlags(final ReqSketch sk) {
final boolean rawItems = sk.getN() <= ReqSketch.MIN_K;
final boolean level0Sorted = sk.getCompactors().get(0).getBuffer().isSorted();
final int flags = (sk.isEmpty() ? 4 : 0)
| (sk.getHighRankAccuracyMode() ? 8 : 0)
| (rawItems ? 16 : 0)
| (level0Sorted ? 32 : 0);
return (byte) flags;
}
static SerDeFormat getSerFormat(final ReqSketch sk) {
if (sk.isEmpty()) { return SerDeFormat.EMPTY; }
if (sk.getN() <= ReqSketch.MIN_K) { return SerDeFormat.RAWITEMS; }
if (sk.getNumLevels() == 1) { return SerDeFormat.EXACT; }
return SerDeFormat.ESTIMATION;
}
private static SerDeFormat getDeserFormat(final boolean empty, final boolean rawItems,
final int numCompactors) {
if (numCompactors <= 1) {
if (empty) { return SerDeFormat.EMPTY; }
if (rawItems) { return SerDeFormat.RAWITEMS; }
return SerDeFormat.EXACT;
}
return SerDeFormat.ESTIMATION;
}
static byte[] toByteArray(final ReqSketch sk) {
final SerDeFormat serDeFormat = getSerFormat(sk);
final int bytes = getSerBytes(sk, serDeFormat);
final byte[] arr = new byte[bytes];
final WritableBuffer wbuf = WritableMemory.writableWrap(arr).asWritableBuffer();
final byte preInts = (byte)(serDeFormat == SerDeFormat.ESTIMATION ? 4 : 2);
final byte flags = getFlags(sk);
final byte numCompactors = sk.isEmpty() ? 0 : (byte) sk.getNumLevels();
final byte numRawItems = sk.getN() <= 4 ? (byte) sk.getN() : 0;
wbuf.putByte(preInts);
wbuf.putByte(SER_VER);
wbuf.putByte(FAMILY_ID);
wbuf.putByte(flags);
wbuf.putShort((short)sk.getK());
wbuf.putByte(numCompactors);
wbuf.putByte(numRawItems);
switch (serDeFormat) {
case EMPTY: {
assert wbuf.getPosition() == bytes;
return arr;
}
case RAWITEMS: {
final ReqCompactor c0 = sk.getCompactors().get(0);
final FloatBuffer fbuf = c0.getBuffer();
for (int i = 0; i < numRawItems; i++) { wbuf.putFloat(fbuf.getItem(i)); }
assert wbuf.getPosition() == bytes;
return arr;
}
case EXACT: {
final ReqCompactor c0 = sk.getCompactors().get(0);
wbuf.putByteArray(c0.toByteArray(), 0, c0.getSerializationBytes());
assert wbuf.getPosition() == bytes;
return arr;
}
default: { //Normal Estimation
wbuf.putLong(sk.getN());
wbuf.putFloat(sk.getMinItem());
wbuf.putFloat(sk.getMaxItem());
for (int i = 0; i < numCompactors; i++) {
final ReqCompactor c = sk.getCompactors().get(i);
wbuf.putByteArray(c.toByteArray(), 0, c.getSerializationBytes());
}
assert wbuf.getPosition() == bytes : wbuf.getPosition() + ", " + bytes;
return arr;
}
}
}
static int getSerBytes(final ReqSketch sk, final SerDeFormat serDeFormat) {
switch (serDeFormat) {
case EMPTY: {
return 8;
}
case RAWITEMS: {
return sk.getCompactors().get(0).getBuffer().getCount() * Float.BYTES + 8;
}
case EXACT: {
return sk.getCompactors().get(0).getSerializationBytes() + 8;
}
default: { //ESTIMATION
int cBytes = 0;
for (int i = 0; i < sk.getNumLevels(); i++) {
cBytes += sk.getCompactors().get(i).getSerializationBytes();
}
return cBytes + 24;
}
}
}
}
| 2,677 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/req/BaseReqSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.req;
import static org.apache.datasketches.quantilescommon.QuantilesUtil.equallyWeightedRanks;
import org.apache.datasketches.quantilescommon.FloatsSortedView;
import org.apache.datasketches.quantilescommon.QuantileSearchCriteria;
import org.apache.datasketches.quantilescommon.QuantilesAPI;
import org.apache.datasketches.quantilescommon.QuantilesFloatsAPI;
import org.apache.datasketches.quantilescommon.QuantilesFloatsSketchIterator;
/**
* This abstract class provides a single place to define and document the public API
* for the Relative Error Quantiles Sketch.
*
* @see <a href="https://datasketches.apache.org/docs/Quantiles/SketchingQuantilesAndRanksTutorial.html">
* Sketching Quantiles and Ranks Tutorial</a>
*
* @author Lee Rhodes
*/
abstract class BaseReqSketch implements QuantilesFloatsAPI {
static final byte INIT_NUMBER_OF_SECTIONS = 3;
//These two factors are used by upper and lower bounds
private static final double relRseFactor = Math.sqrt(0.0512 / INIT_NUMBER_OF_SECTIONS);
private static final double fixRseFactor = .084;
@Override
public abstract double[] getCDF(float[] splitPoints, QuantileSearchCriteria searchCrit);
/**
* If true, the high ranks are prioritized for better accuracy. Otherwise
* the low ranks are prioritized for better accuracy. This state is chosen during sketch
* construction.
* @return the high ranks accuracy state.
*/
public abstract boolean getHighRankAccuracyMode();
@Override
public abstract int getK();
@Override
public abstract float getMaxItem();
@Override
public abstract float getMinItem();
@Override
public FloatsPartitionBoundaries getPartitionBoundaries(final int numEquallyWeighted,
final QuantileSearchCriteria searchCrit) {
if (isEmpty()) { throw new IllegalArgumentException(QuantilesAPI.EMPTY_MSG); }
final double[] ranks = equallyWeightedRanks(numEquallyWeighted);
final float[] boundaries = getQuantiles(ranks, searchCrit);
boundaries[0] = getMinItem();
boundaries[boundaries.length - 1] = getMaxItem();
final FloatsPartitionBoundaries fpb = new FloatsPartitionBoundaries();
fpb.N = this.getN();
fpb.ranks = ranks;
fpb.boundaries = boundaries;
return fpb;
}
/**
* Returns an a priori estimate of relative standard error (RSE, expressed as a number in [0,1]).
* Derived from Lemma 12 in https://arxiv.org/abs/2004.01668v2, but the constant factors were
* adjusted based on empirical measurements.
*
* @param k the given size of k
* @param rank the given normalized rank, a number in [0,1].
* @param hra if true High Rank Accuracy mode is being selected, otherwise, Low Rank Accuracy.
* @param totalN an estimate of the total number of items submitted to the sketch.
* @return an a priori estimate of relative standard error (RSE, expressed as a number in [0,1]).
*/
public static double getRSE(final int k, final double rank, final boolean hra, final long totalN) {
return getRankUB(k, 2, rank, 1, hra, totalN); //more conservative to assume > 1 level
}
@Override
public abstract long getN();
@Override
public abstract double[] getPMF(float[] splitPoints, QuantileSearchCriteria searchCrit);
@Override
public abstract float getQuantile(double rank, QuantileSearchCriteria searchCrit);
@Override
public abstract float[] getQuantiles(double[] normRanks, QuantileSearchCriteria searchCrit);
@Override
public abstract float getQuantileLowerBound(double rank);
public abstract float getQuantileLowerBound(double rank, int numStdDev);
@Override
public abstract float getQuantileUpperBound(double rank);
public abstract float getQuantileUpperBound(double rank, int numStdDev);
@Override
public abstract double getRank(float quantile, QuantileSearchCriteria searchCrit);
/**
* Gets an approximate lower bound rank of the given normalized rank.
* @param rank the given rank, a number between 0 and 1.0.
* @param numStdDev the number of standard deviations. Must be 1, 2, or 3.
* @return an approximate lower bound rank.
*/
public abstract double getRankLowerBound(double rank, int numStdDev);
@Override
public abstract double[] getRanks(float[] quantiles, QuantileSearchCriteria searchCrit);
/**
* Gets an approximate upper bound rank of the given rank.
* @param rank the given rank, a number between 0 and 1.0.
* @param numStdDev the number of standard deviations. Must be 1, 2, or 3.
* @return an approximate upper bound rank.
*/
public abstract double getRankUpperBound(double rank, int numStdDev);
@Override
public abstract int getNumRetained();
@Override
public abstract int getSerializedSizeBytes();
@Override
public abstract FloatsSortedView getSortedView();
@Override
public boolean hasMemory() {
return false;
}
@Override
public boolean isDirect() {
return false;
}
@Override
public abstract boolean isEmpty();
@Override
public abstract boolean isEstimationMode();
@Override
public boolean isReadOnly() {
return false;
}
@Override
public abstract QuantilesFloatsSketchIterator iterator();
/**
* Merge other sketch into this one. The other sketch is not modified.
* @param other sketch to be merged into this one.
* @return this
*/
public abstract ReqSketch merge(final ReqSketch other);
/**
* {@inheritDoc}
* <p>The parameters k, highRankAccuracy, and reqDebug will not change.</p>
*/
@Override
public abstract void reset();
@Override
public abstract byte[] toByteArray();
@Override
public abstract String toString();
@Override
public abstract void update(final float item);
/**
* A detailed, human readable view of the sketch compactors and their data.
* Each compactor string is prepended by the compactor lgWeight, the current number of retained
* quantiles of the compactor and the current nominal capacity of the compactor.
* @param fmt the format string for the quantiles; example: "%4.0f".
* @param allData all the retained quantiles for the sketch will be output by
* compactor level. Otherwise, just a summary will be output.
* @return a detailed view of the compactors and their data
*/
public abstract String viewCompactorDetail(String fmt, boolean allData);
static boolean exactRank(final int k, final int levels, final double rank,
final boolean hra, final long totalN) {
final int baseCap = k * INIT_NUMBER_OF_SECTIONS;
if (levels == 1 || totalN <= baseCap) { return true; }
final double exactRankThresh = (double)baseCap / totalN;
return hra && rank >= 1.0 - exactRankThresh || !hra && rank <= exactRankThresh;
}
static double getRankLB(final int k, final int levels, final double rank,
final int numStdDev, final boolean hra, final long totalN) {
if (exactRank(k, levels, rank, hra, totalN)) { return rank; }
final double relative = relRseFactor / k * (hra ? 1.0 - rank : rank);
final double fixed = fixRseFactor / k;
final double lbRel = rank - numStdDev * relative;
final double lbFix = rank - numStdDev * fixed;
return Math.max(lbRel, lbFix);
}
static double getRankUB(final int k, final int levels, final double rank,
final int numStdDev, final boolean hra, final long totalN) {
if (exactRank(k, levels, rank, hra, totalN)) { return rank; }
final double relative = relRseFactor / k * (hra ? 1.0 - rank : rank);
final double fixed = fixRseFactor / k;
final double ubRel = rank + numStdDev * relative;
final double ubFix = rank + numStdDev * fixed;
return Math.min(ubRel, ubFix);
}
}
| 2,678 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/req/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* This package is for the implementation of the Relative Error Quantiles sketch algorithm.
* @see org.apache.datasketches.req.ReqSketch
*/
package org.apache.datasketches.req;
| 2,679 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/common/BoundsOnBinomialProportions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.common;
/**
* Confidence intervals for binomial proportions.
*
* <p>This class computes an approximation to the Clopper-Pearson confidence interval
* for a binomial proportion. Exact Clopper-Pearson intervals are strictly
* conservative, but these approximations are not.</p>
*
* <p>The main inputs are numbers <i>n</i> and <i>k</i>, which are not the same as other things
* that are called <i>n</i> and <i>k</i> in our sketching library. There is also a third
* parameter, numStdDev, that specifies the desired confidence level.</p>
* <ul>
* <li><i>n</i> is the number of independent randomized trials. It is given and therefore known.</li>
* <li><i>p</i> is the probability of a trial being a success. It is unknown.</li>
* <li><i>k</i> is the number of trials (out of <i>n</i>) that turn out to be successes. It is
* a random variable governed by a binomial distribution. After any given
* batch of <i>n</i> independent trials, the random variable <i>k</i> has a specific
* value which is observed and is therefore known.</li>
* <li><i>pHat</i> = <i>k</i> / <i>n</i> is an unbiased estimate of the unknown success
* probability <i>p</i>.</li>
* </ul>
*
* <p>Alternatively, consider a coin with unknown heads probability <i>p</i>. Where
* <i>n</i> is the number of independent flips of that coin, and <i>k</i> is the number
* of times that the coin comes up heads during a given batch of <i>n</i> flips.
* This class computes a frequentist confidence interval [lowerBoundOnP, upperBoundOnP] for the
* unknown <i>p</i>.</p>
*
* <p>Conceptually, the desired confidence level is specified by a tail probability delta.</p>
*
* <p>Ideally, over a large ensemble of independent batches of trials,
* the fraction of batches in which the true <i>p</i> lies below lowerBoundOnP would be at most
* delta, and the fraction of batches in which the true <i>p</i> lies above upperBoundOnP
* would also be at most delta.
*
* <p>Setting aside the philosophical difficulties attaching to that statement, it isn't quite
* true because we are approximating the Clopper-Pearson interval.</p>
*
* <p>Finally, we point out that in this class's interface, the confidence parameter delta is
* not specified directly, but rather through a "number of standard deviations" numStdDev.
* The library effectively converts that to a delta via delta = normalCDF (-1.0 * numStdDev).</p>
*
* <p>It is perhaps worth emphasizing that the library is NOT merely adding and subtracting
* numStdDev standard deviations to the estimate. It is doing something better, that to some
* extent accounts for the fact that the binomial distribution has a non-gaussian shape.</p>
*
* <p>In particular, it is using an approximation to the inverse of the incomplete beta function
* that appears as formula 26.5.22 on page 945 of the "Handbook of Mathematical Functions"
* by Abramowitz and Stegun.</p>
*
* @author Kevin Lang
*/
public final class BoundsOnBinomialProportions { // confidence intervals for binomial proportions
private BoundsOnBinomialProportions() {}
/**
* Computes lower bound of approximate Clopper-Pearson confidence interval for a binomial
* proportion.
*
* <p>Implementation Notes:<br>
* The approximateLowerBoundOnP is defined with respect to the right tail of the binomial
* distribution.</p>
* <ul>
* <li>We want to solve for the <i>p</i> for which sum<sub><i>j,k,n</i></sub>bino(<i>j;n,p</i>)
* = delta.</li>
* <li>We now restate that in terms of the left tail.</li>
* <li>We want to solve for the p for which sum<sub><i>j,0,(k-1)</i></sub>bino(<i>j;n,p</i>)
* = 1 - delta.</li>
* <li>Define <i>x</i> = 1-<i>p</i>.</li>
* <li>We want to solve for the <i>x</i> for which I<sub><i>x(n-k+1,k)</i></sub> = 1 - delta.</li>
* <li>We specify 1-delta via numStdDevs through the right tail of the standard normal
* distribution.</li>
* <li>Smaller values of numStdDevs correspond to bigger values of 1-delta and hence to smaller
* values of delta. In fact, usefully small values of delta correspond to negative values of
* numStdDevs.</li>
* <li>return <i>p</i> = 1-<i>x</i>.</li>
* </ul>
*
* @param n is the number of trials. Must be non-negative.
* @param k is the number of successes. Must be non-negative, and cannot exceed n.
* @param numStdDevs the number of standard deviations defining the confidence interval
* @return the lower bound of the approximate Clopper-Pearson confidence interval for the
* unknown success probability.
*/
public static double approximateLowerBoundOnP(final long n, final long k, final double numStdDevs) {
checkInputs(n, k);
if (n == 0) { return 0.0; } // the coin was never flipped, so we know nothing
else if (k == 0) { return 0.0; }
else if (k == 1) { return (exactLowerBoundOnPForKequalsOne(n, deltaOfNumStdevs(numStdDevs))); }
else if (k == n) { return (exactLowerBoundOnPForKequalsN(n, deltaOfNumStdevs(numStdDevs))); }
else {
final double x = abramowitzStegunFormula26p5p22((n - k) + 1, k, (-1.0 * numStdDevs));
return (1.0 - x); // which is p
}
}
/**
* Computes upper bound of approximate Clopper-Pearson confidence interval for a binomial
* proportion.
*
* <p>Implementation Notes:<br>
* The approximateUpperBoundOnP is defined with respect to the left tail of the binomial
* distribution.</p>
* <ul>
* <li>We want to solve for the <i>p</i> for which sum<sub><i>j,0,k</i></sub>bino(<i>j;n,p</i>)
* = delta.</li>
* <li>Define <i>x</i> = 1-<i>p</i>.</li>
* <li>We want to solve for the <i>x</i> for which I<sub><i>x(n-k,k+1)</i></sub> = delta.</li>
* <li>We specify delta via numStdDevs through the right tail of the standard normal
* distribution.</li>
* <li>Bigger values of numStdDevs correspond to smaller values of delta.</li>
* <li>return <i>p</i> = 1-<i>x</i>.</li>
* </ul>
* @param n is the number of trials. Must be non-negative.
* @param k is the number of successes. Must be non-negative, and cannot exceed <i>n</i>.
* @param numStdDevs the number of standard deviations defining the confidence interval
* @return the upper bound of the approximate Clopper-Pearson confidence interval for the
* unknown success probability.
*/
public static double approximateUpperBoundOnP(final long n, final long k, final double numStdDevs) {
checkInputs(n, k);
if (n == 0) { return 1.0; } // the coin was never flipped, so we know nothing
else if (k == n) { return 1.0; }
else if (k == (n - 1)) {
return (exactUpperBoundOnPForKequalsNminusOne(n, deltaOfNumStdevs(numStdDevs)));
}
else if (k == 0) {
return (exactUpperBoundOnPForKequalsZero(n, deltaOfNumStdevs(numStdDevs)));
}
else {
final double x = abramowitzStegunFormula26p5p22(n - k, k + 1, numStdDevs);
return (1.0 - x); // which is p
}
}
/**
* Computes an estimate of an unknown binomial proportion.
* @param n is the number of trials. Must be non-negative.
* @param k is the number of successes. Must be non-negative, and cannot exceed n.
* @return the estimate of the unknown binomial proportion.
*/
public static double estimateUnknownP(final long n, final long k) {
checkInputs(n, k);
if (n == 0) { return 0.5; } // the coin was never flipped, so we know nothing
else { return ((double) k / (double) n); }
}
private static void checkInputs(final long n, final long k) {
if (n < 0) { throw new SketchesArgumentException("N must be non-negative"); }
if (k < 0) { throw new SketchesArgumentException("K must be non-negative"); }
if (k > n) { throw new SketchesArgumentException("K cannot exceed N"); }
}
/**
* Computes an approximation to the erf() function.
* @param x is the input to the erf function
* @return returns erf(x), accurate to roughly 7 decimal digits.
*/
public static double erf(final double x) {
if (x < 0.0) { return (-1.0 * (erf_of_nonneg(-1.0 * x))); }
else { return (erf_of_nonneg(x)); }
}
/**
* Computes an approximation to normalCDF(x).
* @param x is the input to the normalCDF function
* @return returns the approximation to normalCDF(x).
*/
public static double normalCDF(final double x) {
return (0.5 * (1.0 + (erf(x / (Math.sqrt(2.0))))));
}
//@formatter:off
// Abramowitz and Stegun formula 7.1.28, p. 88; Claims accuracy of about 7 decimal digits
private static double erf_of_nonneg(final double x) {
// The constants from the book
final double a1 = 0.07052_30784;
final double a3 = 0.00927_05272;
final double a5 = 0.00027_65672;
final double a2 = 0.04228_20123;
final double a4 = 0.00015_20143;
final double a6 = 0.00004_30638;
final double x2 = x * x; // x squared, x cubed, etc.
final double x3 = x2 * x;
final double x4 = x2 * x2;
final double x5 = x2 * x3;
final double x6 = x3 * x3;
final double sum = ( 1.0
+ (a1 * x)
+ (a2 * x2)
+ (a3 * x3)
+ (a4 * x4)
+ (a5 * x5)
+ (a6 * x6) );
final double sum2 = sum * sum; // raise the sum to the 16th power
final double sum4 = sum2 * sum2;
final double sum8 = sum4 * sum4;
final double sum16 = sum8 * sum8;
return (1.0 - (1.0 / sum16));
}
//@formatter:on
private static double deltaOfNumStdevs(final double kappa) {
return (normalCDF(-1.0 * kappa));
}
// Formula 26.5.22 on page 945 of Abramowitz & Stegun, which is an approximation
// of the inverse of the incomplete beta function I_x(a,b) = delta
// viewed as a scalar function of x.
// In other words, we specify delta, and it gives us x (with a and b held constant).
// However, delta is specified in an indirect way through yp which
// is the number of stdDevs that leaves delta probability in the right
// tail of a standard gaussian distribution.
// We point out that the variable names correspond to those in the book,
// and it is worth keeping it that way so that it will always be easy to verify
// that the formula was typed in correctly.
private static double abramowitzStegunFormula26p5p22(final double a, final double b,
final double yp) {
final double b2m1 = (2.0 * b) - 1.0;
final double a2m1 = (2.0 * a) - 1.0;
final double lambda = ((yp * yp) - 3.0) / 6.0;
final double htmp = (1.0 / a2m1) + (1.0 / b2m1);
final double h = 2.0 / htmp;
final double term1 = (yp * (Math.sqrt(h + lambda))) / h;
final double term2 = (1.0 / b2m1) - (1.0 / a2m1);
final double term3 = (lambda + (5.0 / 6.0)) - (2.0 / (3.0 * h));
final double w = term1 - (term2 * term3);
final double xp = a / (a + (b * (Math.exp(2.0 * w))));
return xp;
}
// Formulas for some special cases.
private static double exactUpperBoundOnPForKequalsZero(final double n, final double delta) {
return (1.0 - Math.pow(delta, (1.0 / n)));
}
private static double exactLowerBoundOnPForKequalsN(final double n, final double delta) {
return (Math.pow(delta, (1.0 / n)));
}
private static double exactLowerBoundOnPForKequalsOne(final double n, final double delta) {
return (1.0 - Math.pow((1.0 - delta), (1.0 / n)));
}
private static double exactUpperBoundOnPForKequalsNminusOne(final double n, final double delta) {
return (Math.pow((1.0 - delta), (1.0 / n)));
}
}
| 2,680 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/common/ArrayOfDoublesSerDe.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.common;
import static org.apache.datasketches.common.ByteArrayUtil.putDoubleLE;
import java.util.Objects;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
/**
* Methods of serializing and deserializing arrays of Double.
*
* @author Alexander Saydakov
*/
public class ArrayOfDoublesSerDe extends ArrayOfItemsSerDe<Double> {
@Override
public byte[] serializeToByteArray(final Double item) {
Objects.requireNonNull(item, "Item must not be null");
final byte[] byteArr = new byte[Double.BYTES];
putDoubleLE(byteArr, 0, item.doubleValue());
return byteArr;
}
@Override
public byte[] serializeToByteArray(final Double[] items) {
Objects.requireNonNull(items, "Items must not be null");
if (items.length == 0) { return new byte[0]; }
final byte[] bytes = new byte[Double.BYTES * items.length];
final WritableMemory mem = WritableMemory.writableWrap(bytes);
long offset = 0;
for (int i = 0; i < items.length; i++) {
mem.putDouble(offset, items[i]);
offset += Double.BYTES;
}
return bytes;
}
@Override
@Deprecated
public Double[] deserializeFromMemory(final Memory mem, final int numItems) {
return deserializeFromMemory(mem, 0, numItems);
}
@Override
public Double[] deserializeFromMemory(final Memory mem, final long offsetBytes, final int numItems) {
Objects.requireNonNull(mem, "Memory must not be null");
if (numItems <= 0) { return new Double[0]; }
long offset = offsetBytes;
Util.checkBounds(offset, Double.BYTES * (long)numItems, mem.getCapacity());
final Double[] array = new Double[numItems];
for (int i = 0; i < numItems; i++) {
array[i] = mem.getDouble(offset);
offset += Double.BYTES;
}
return array;
}
@Override
public int sizeOf(final Double item) {
Objects.requireNonNull(item, "Item must not be null");
return Double.BYTES;
}
@Override //override because this is simpler
public int sizeOf(final Double[] items) {
Objects.requireNonNull(items, "Items must not be null");
return items.length * Double.BYTES;
}
@Override
public int sizeOf(final Memory mem, final long offsetBytes, final int numItems) {
Objects.requireNonNull(mem, "Memory must not be null");
return numItems * Double.BYTES;
}
@Override
public String toString(final Double item) {
if (item == null) { return "null"; }
return item.toString();
}
@Override
public Class<Double> getClassOfT() { return Double.class; }
}
| 2,681 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/common/SketchesArgumentException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.common;
/**
* Illegal Arguments Exception class for the library
*
* @author Lee Rhodes
*/
public class SketchesArgumentException extends SketchesException {
private static final long serialVersionUID = 1L;
//other constructors to be added as needed.
/**
* Constructs a new runtime exception with the specified detail message. The cause is not
* initialized, and may subsequently be initialized by a call to
* Throwable.initCause(java.lang.Throwable).
*
* @param message the detail message. The detail message is saved for later retrieval by the
* Throwable.getMessage() method.
*/
public SketchesArgumentException(final String message) {
super(message);
}
}
| 2,682 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/common/ArrayOfItemsSerDe.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.common;
import java.util.Objects;
import org.apache.datasketches.memory.Memory;
/**
* Base class for serializing and deserializing custom types.
* @param <T> Type of item
*
* @author Alexander Saydakov
*/
public abstract class ArrayOfItemsSerDe<T> {
/**
* Serialize a single unserialized item to a byte array.
*
* @param item the item to be serialized
* @return serialized representation of the given item
*/
public abstract byte[] serializeToByteArray(T item);
/**
* Serialize an array of unserialized items to a byte array of contiguous serialized items.
*
* @param items array of items to be serialized
* @return contiguous, serialized representation of the given array of unserialized items
*/
public abstract byte[] serializeToByteArray(T[] items);
/**
* Deserialize a contiguous sequence of serialized items from a given Memory.
*
* @param mem Memory containing a contiguous sequence of serialized items
* @param numItems number of items in the contiguous serialized sequence.
* @return array of deserialized items
* @deprecated use
* {@link #deserializeFromMemory(Memory, long, int) deserializeFromMemory(mem, offset, numItems)}
*/
@Deprecated
public abstract T[] deserializeFromMemory(Memory mem, int numItems);
/**
* Deserialize a contiguous sequence of serialized items from a given Memory.
*
* @param mem Memory containing a contiguous sequence of serialized items
* @param offsetBytes the starting offset in the given Memory.
* @param numItems number of items in the contiguous serialized sequence.
* @return array of deserialized items
*/
public abstract T[] deserializeFromMemory(Memory mem, long offsetBytes, int numItems);
/**
* Returns the serialized size in bytes of a single unserialized item.
* @param item a specific item
* @return the serialized size in bytes of a single unserialized item.
*/
public abstract int sizeOf(T item);
/**
* Returns the serialized size in bytes of the array of items.
* @param items an array of items.
* @return the serialized size in bytes of the array of items.
*/
public int sizeOf(final T[] items) {
Objects.requireNonNull(items, "Items must not be null");
int totalBytes = 0;
for (int i = 0; i < items.length; i++) {
totalBytes += sizeOf(items[i]);
}
return totalBytes;
}
/**
* Returns the serialized size in bytes of the number of contiguous serialized items in Memory.
* The capacity of the given Memory can be much larger that the required size of the items.
* @param mem the given Memory.
* @param offsetBytes the starting offset in the given Memory.
* @param numItems the number of serialized items contained in the Memory
* @return the serialized size in bytes of the given number of items.
*/
public abstract int sizeOf(Memory mem, long offsetBytes, int numItems);
/**
* Returns a human readable string of an item.
* @param item a specific item
* @return a human readable string of an item.
*/
public abstract String toString(T item);
/**
* Returns the concrete class of type T
* @return the concrete class of type T
*/
public abstract Class<?> getClassOfT();
}
| 2,683 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/common/Util.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.common;
import static java.lang.Math.ceil;
import static java.lang.Math.floor;
import static java.lang.Math.log;
import static java.lang.Math.pow;
import static java.lang.Math.round;
import java.util.Comparator;
/**
* Common utility functions.
*
* @author Lee Rhodes
*/
@SuppressWarnings("unchecked")
public final class Util {
/**
* The java line separator character as a String.
*/
public static final String LS = System.getProperty("line.separator");
/**
* The tab character
*/
public static final char TAB = '\t';
/**
* The natural logarithm of 2.0.
*/
public static final double LOG2 = log(2.0);
/**
* The inverse golden ratio as an unsigned long.
*/
public static final long INVERSE_GOLDEN_U64 = 0x9e3779b97f4a7c13L;
/**
* The inverse golden ratio as a fraction.
* This has more precision than using the formula: (Math.sqrt(5.0) - 1.0) / 2.0.
*/
public static final double INVERSE_GOLDEN = 0.6180339887498949025;
/**
* Long.MAX_VALUE as a double.
*/
public static final double LONG_MAX_VALUE_AS_DOUBLE = Long.MAX_VALUE;
private Util() {}
//Byte Conversions
/**
* Returns an int extracted from a Little-Endian byte array.
* @param arr the given byte array
* @return an int extracted from a Little-Endian byte array.
*/
public static int bytesToInt(final byte[] arr) {
return arr[3] << 24
| (arr[2] & 0xff) << 16
| (arr[1] & 0xff) << 8
| arr[0] & 0xff;
}
/**
* Returns a long extracted from a Little-Endian byte array.
* @param arr the given byte array
* @return a long extracted from a Little-Endian byte array.
*/
public static long bytesToLong(final byte[] arr) {
return (long)arr[7] << 56
| ((long)arr[6] & 0xff) << 48
| ((long)arr[5] & 0xff) << 40
| ((long)arr[4] & 0xff) << 32
| ((long)arr[3] & 0xff) << 24
| ((long)arr[2] & 0xff) << 16
| ((long)arr[1] & 0xff) << 8
| (long)arr[0] & 0xff;
}
/**
* Returns a Little-Endian byte array extracted from the given int.
* @param v the given int
* @param arr a given array of 4 bytes that will be returned with the data
* @return a Little-Endian byte array extracted from the given int.
*/
public static byte[] intToBytes(final int v, final byte[] arr) {
arr[3] = (byte) (v >>> 24);
arr[2] = (byte) (v >>> 16);
arr[1] = (byte) (v >>> 8);
arr[0] = (byte) v;
return arr;
}
/**
* Returns a Little-Endian byte array extracted from the given long.
* @param v the given long
* @param arr a given array of 8 bytes that will be returned with the data
* @return a Little-Endian byte array extracted from the given long.
*/
public static byte[] longToBytes(final long v, final byte[] arr) {
arr[7] = (byte) (v >>> 56);
arr[6] = (byte) (v >>> 48);
arr[5] = (byte) (v >>> 40);
arr[4] = (byte) (v >>> 32);
arr[3] = (byte) (v >>> 24);
arr[2] = (byte) (v >>> 16);
arr[1] = (byte) (v >>> 8);
arr[0] = (byte) v;
return arr;
}
//Byte array conversions
static long[] convertToLongArray(final byte[] byteArr, final boolean littleEndian) {
final int len = byteArr.length;
final long[] longArr = new long[len / 8 + (len % 8 != 0 ? 1 : 0)];
int off = 0;
int longArrIdx = 0;
while (off < len) {
final int rem = Math.min(len - 1 - off, 7);
long tgt = 0;
if (littleEndian) {
for (int j = off + rem, k = 0; j >= off; --j, k++) {
tgt |= (byteArr[j] & 0XFFL) << (k * 8);
}
} else { //BE
for (int j = off + rem, k = rem; j >= off; --j, k--) {
tgt |= (byteArr[j] & 0XFFL) << (k * 8);
}
}
off += 8;
longArr[longArrIdx++] = tgt;
}
return longArr;
}
//String Related
/**
* Returns a string of spaced hex bytes in Big-Endian order.
* @param v the given long
* @return string of spaced hex bytes in Big-Endian order.
*/
public static String longToHexBytes(final long v) {
final long mask = 0XFFL;
final StringBuilder sb = new StringBuilder();
for (int i = 8; i-- > 0; ) {
final String s = Long.toHexString(v >>> i * 8 & mask);
sb.append(zeroPad(s, 2)).append(" ");
}
return sb.toString();
}
/**
* Returns a string view of a byte array
* @param arr the given byte array
* @param signed set true if you want the byte values signed.
* @param littleEndian set true if you want Little-Endian order
* @param sep the separator string between bytes
* @return a string view of a byte array
*/
public static String bytesToString(
final byte[] arr, final boolean signed, final boolean littleEndian, final String sep) {
final StringBuilder sb = new StringBuilder();
final int mask = signed ? 0XFFFFFFFF : 0XFF;
final int arrLen = arr.length;
if (littleEndian) {
for (int i = 0; i < arrLen - 1; i++) {
sb.append(arr[i] & mask).append(sep);
}
sb.append(arr[arrLen - 1] & mask);
} else {
for (int i = arrLen; i-- > 1; ) {
sb.append(arr[i] & mask).append(sep);
}
sb.append(arr[0] & mask);
}
return sb.toString();
}
/**
* Returns the given time in nanoseconds formatted as Sec.mSec_uSec_nSec
* @param nS the given nanoseconds
* @return the given time in nanoseconds formatted as Sec.mSec_uSec_nSec
*/
public static String nanoSecToString(final long nS) {
final long rem_nS = (long)(nS % 1000.0);
final long rem_uS = (long)(nS / 1000.0 % 1000.0);
final long rem_mS = (long)(nS / 1000000.0 % 1000.0);
final long sec = (long)(nS / 1000000000.0);
final String nSstr = zeroPad(Long.toString(rem_nS), 3);
final String uSstr = zeroPad(Long.toString(rem_uS), 3);
final String mSstr = zeroPad(Long.toString(rem_mS), 3);
return String.format("%d.%3s_%3s_%3s", sec, mSstr, uSstr, nSstr);
}
/**
* Returns the given time in milliseconds formatted as Hours:Min:Sec.mSec
* @param mS the given nanoseconds
* @return the given time in milliseconds formatted as Hours:Min:Sec.mSec
*/
public static String milliSecToString(final long mS) {
final long rem_mS = (long)(mS % 1000.0);
final long rem_sec = (long)(mS / 1000.0 % 60.0);
final long rem_min = (long)(mS / 60000.0 % 60.0);
final long hr = (long)(mS / 3600000.0);
final String mSstr = zeroPad(Long.toString(rem_mS), 3);
final String secStr = zeroPad(Long.toString(rem_sec), 2);
final String minStr = zeroPad(Long.toString(rem_min), 2);
return String.format("%d:%2s:%2s.%3s", hr, minStr, secStr, mSstr);
}
/**
* Prepend the given string with zeros. If the given string is equal or greater than the given
* field length, it will be returned without modification.
* @param s the given string
* @param fieldLength desired total field length including the given string
* @return the given string prepended with zeros.
*/
public static String zeroPad(final String s, final int fieldLength) {
return characterPad(s, fieldLength, '0', false);
}
/**
* Prepend or postpend the given string with the given character to fill the given field length.
* If the given string is equal or greater than the given field length, it will be returned
* without modification.
* @param s the given string
* @param fieldLength the desired field length
* @param padChar the desired pad character
* @param postpend if true append the pacCharacters to the end of the string.
* @return prepended or postpended given string with the given character to fill the given field
* length.
*/
public static String characterPad(final String s, final int fieldLength, final char padChar,
final boolean postpend) {
final char[] chArr = s.toCharArray();
final int sLen = chArr.length;
if (sLen < fieldLength) {
final char[] out = new char[fieldLength];
final int blanks = fieldLength - sLen;
if (postpend) {
for (int i = 0; i < sLen; i++) {
out[i] = chArr[i];
}
for (int i = sLen; i < fieldLength; i++) {
out[i] = padChar;
}
} else { //prepend
for (int i = 0; i < blanks; i++) {
out[i] = padChar;
}
for (int i = blanks; i < fieldLength; i++) {
out[i] = chArr[i - blanks];
}
}
return String.valueOf(out);
}
return s;
}
//Memory byte alignment
/**
* Checks if parameter v is a multiple of 8 and greater than zero.
* @param v The parameter to check
* @param argName This name will be part of the error message if the check fails.
*/
public static void checkIfMultipleOf8AndGT0(final long v, final String argName) {
if ((v & 0X7L) == 0L && v > 0L) {
return;
}
throw new SketchesArgumentException("The value of the parameter \"" + argName
+ "\" must be a positive multiple of 8 and greater than zero: " + v);
}
/**
* Returns true if v is a multiple of 8 and greater than zero
* @param v The parameter to check
* @return true if v is a multiple of 8 and greater than zero
*/
public static boolean isMultipleOf8AndGT0(final long v) {
return (v & 0X7L) == 0L && v > 0L;
}
//Powers of 2 or powers of base related
/**
* Returns true if given int argument is exactly a positive power of 2 and greater than zero.
*
* @param powerOf2 The input argument.
* @return true if argument is exactly a positive power of 2 and greater than zero.
*/
public static boolean isIntPowerOf2(final int powerOf2) {
return powerOf2 > 0 && (powerOf2 & powerOf2 - 1) == 0; //or (v > 0) && ((v & -v) == v)
}
/**
* Returns true if given long argument is exactly a positive power of 2 and greater than zero.
*
* @param powerOf2 The input argument.
* @return true if argument is exactly a positive power of 2 and greater than zero.
*/
public static boolean isLongPowerOf2(final long powerOf2) {
return powerOf2 > 0 && (powerOf2 & powerOf2 - 1L) == 0; //or (v > 0) && ((v & -v) == v)
}
/**
* Checks the given int argument to make sure it is a positive power of 2 and greater than zero.
* If not it throws an exception with the user supplied local argument name.
* @param powerOf2 The input int argument must be a power of 2 and greater than zero.
* @param argName Used in the thrown exception.
* @throws SketchesArgumentException if not a power of 2 nor greater than zero.
*/
public static void checkIfIntPowerOf2(final int powerOf2, final String argName) {
if (isIntPowerOf2(powerOf2)) { return; }
throw new SketchesArgumentException("The value of the int argument \"" + argName + "\""
+ " must be a positive integer-power of 2" + " and greater than 0: " + powerOf2);
}
/**
* Checks the given long argument to make sure it is a positive power of 2 and greater than zero.
* If not, it throws an exception with the user supplied local argument name.
* @param powerOf2 The input long argument must be a power of 2 and greater than zero.
* @param argName Used in the thrown exception.
* @throws SketchesArgumentException if not a power of 2 nor greater than zero.
*/
public static void checkIfLongPowerOf2(final long powerOf2, final String argName) {
if (isLongPowerOf2(powerOf2)) { return; }
throw new SketchesArgumentException("The value of the int argument \"" + argName + "\""
+ " must be a positive integer-power of 2" + " and greater than 0: " + powerOf2);
}
/**
* Computes the int ceiling power of 2 within the range [1, 2^30]. This is the smallest positive power
* of 2 that is equal to or greater than the given n and a mathematical integer.
*
* <p>For:
* <ul>
* <li>n ≤ 1: returns 1</li>
* <li>2^30 ≤ n ≤ 2^31 -1 : returns 2^30</li>
* <li>n == an exact power of 2 : returns n</li>
* <li>otherwise returns the smallest power of 2 ≥ n and equal to a mathematical integer</li>
* </ul>
*
* @param n The input int argument.
* @return the ceiling power of 2.
*/
public static int ceilingIntPowerOf2(final int n) {
if (n <= 1) { return 1; }
final int topIntPwrOf2 = 1 << 30;
return n >= topIntPwrOf2 ? topIntPwrOf2 : Integer.highestOneBit(n - 1 << 1);
}
/**
* Computes the long ceiling power of 2 within the range [1, 2^30]. This is the smallest positive power
* of 2 that is equal to or greater than the given n and a mathematical integer.
*
* <p>For:
* <ul>
* <li>n ≤ 1: returns 1</li>
* <li>2^62 ≤ n ≤ 2^63 -1 : returns 2^62</li>
* <li>n == an exact power of 2 : returns n</li>
* <li>otherwise returns the smallest power of 2 ≥ n and equal to a mathematical integer</li>
* </ul>
*
* @param n The input long argument.
* @return the ceiling power of 2.
*/
public static long ceilingLongPowerOf2(final long n) {
if (n <= 1L) { return 1L; }
final long topIntPwrOf2 = 1L << 62;
return n >= topIntPwrOf2 ? topIntPwrOf2 : Long.highestOneBit(n - 1L << 1);
}
/**
* Computes the floor power of 2 given <i>n</i> is in the range [1, 2^31-1].
* This is the largest positive power of 2 that equal to or less than the given n and equal
* to a mathematical integer.
*
* <p>For:
* <ul>
* <li>n ≤ 1: returns 1</li>
* <li>2^30 ≤ n ≤ 2^31 -1 : returns 2^30</li>
* <li>n == a power of 2 : returns n</li>
* <li>otherwise returns the largest power of 2 less than n and equal to a mathematical
* integer.</li>
* </ul>
*
* @param n The given int argument.
* @return the floor power of 2 as an int.
*/
public static int floorPowerOf2(final int n) {
if (n <= 1) { return 1; }
return Integer.highestOneBit(n);
}
/**
* Computes the floor power of 2 given <i>n</i> is in the range [1, 2^63-1].
* This is the largest positive power of 2 that is equal to or less than the given <i>n</i> and
* equal to a mathematical integer.
*
* <p>For:
* <ul>
* <li>n ≤ 1: returns 1</li>
* <li>2^62 ≤ n ≤ 2^63 -1 : returns 2^62</li>
* <li>n == a power of 2 : returns n</li>
* <li>otherwise returns the largest power of 2 less than n and equal to a mathematical
* integer.</li>
* </ul>
*
* @param n The given long argument.
* @return the floor power of 2 as a long
*/
public static long floorPowerOf2(final long n) {
if (n <= 1) { return 1; }
return Long.highestOneBit(n);
}
/**
* Computes the inverse integer power of 2: 1/(2^e) = 2^(-e).
* @param e a positive value between 0 and 1023 inclusive
* @return the inverse integer power of 2: 1/(2^e) = 2^(-e)
*/
public static double invPow2(final int e) {
assert (e | 1024 - e - 1) >= 0 : "e cannot be negative or greater than 1023: " + e;
return Double.longBitsToDouble(1023L - e << 52);
}
/**
* Computes the next larger integer point in the power series
* <i>point = 2<sup>( i / ppo )</sup></i> given the current point in the series.
* For illustration, this can be used in a loop as follows:
*
* <pre>{@code
* int maxP = 1024;
* int minP = 1;
* int ppo = 2;
*
* for (int p = minP; p <= maxP; p = pwr2LawNext(ppo, p)) {
* System.out.print(p + " ");
* }
* //generates the following series:
* //1 2 3 4 6 8 11 16 23 32 45 64 91 128 181 256 362 512 724 1024
* }</pre>
*
* @param ppo Points-Per-Octave, or the number of points per integer powers of 2 in the series.
* @param curPoint the current point of the series. Must be ≥ 1.
* @return the next point in the power series.
*/
public static long pwr2SeriesNext(final int ppo, final long curPoint) {
final long cur = curPoint < 1L ? 1L : curPoint;
int gi = (int)round(log2(cur) * ppo); //current generating index
long next;
do {
next = round(pow(2.0, (double) ++gi / ppo));
} while ( next <= curPoint);
return next;
}
/**
* Computes the previous, smaller integer point in the power series
* <i>point = 2<sup>( i / ppo )</sup></i> given the current point in the series.
* For illustration, this can be used in a loop as follows:
*
* <pre>{@code
* int maxP = 1024;
* int minP = 1;
* int ppo = 2;
*
* for (int p = maxP; p >= minP; p = pwr2LawPrev(ppo, p)) {
* System.out.print(p + " ");
* }
* //generates the following series:
* //1024 724 512 362 256 181 128 91 64 45 32 23 16 11 8 6 4 3 2 1
* }</pre>
*
* @param ppo Points-Per-Octave, or the number of points per integer powers of 2 in the series.
* @param curPoint the current point of the series. Must be ≥ 1.
* @return the previous, smaller point in the power series.
* A returned value of zero terminates the series.
*/
public static int pwr2SeriesPrev(final int ppo, final int curPoint) {
if (curPoint <= 1) { return 0; }
int gi = (int)round(log2(curPoint) * ppo); //current generating index
int prev;
do {
prev = (int)round(pow(2.0, (double) --gi / ppo));
} while (prev >= curPoint);
return prev;
}
/**
* Computes the next larger double in the power series
* <i>point = logBase<sup>( i / ppb )</sup></i> given the current point in the series.
* For illustration, this can be used in a loop as follows:
*
* <pre>{@code
* double maxP = 1024.0;
* double minP = 1.0;
* int ppb = 2;
* double logBase = 2.0;
*
* for (double p = minP; p <= maxP; p = powerSeriesNextDouble(ppb, p, true, logBase)) {
* System.out.print(p + " ");
* }
* //generates the following series:
* //1 2 3 4 6 8 11 16 23 32 45 64 91 128 181 256 362 512 724 1024
* }</pre>
*
* @param ppb Points-Per-Base, or the number of points per integer powers of base in the series.
* @param curPoint the current point of the series. Must be ≥ 1.0.
* @param roundToLong if true the output will be rounded to the nearest long.
* @param logBase the desired base of the logarithms
* @return the next point in the power series.
*/
public static double powerSeriesNextDouble(final int ppb, final double curPoint,
final boolean roundToLong, final double logBase) {
final double cur = curPoint < 1.0 ? 1.0 : curPoint;
double gi = round(logBaseOfX(logBase, cur) * ppb ); //current generating index
double next;
do {
final double n = pow(logBase, ++gi / ppb);
next = roundToLong ? round(n) : n;
} while (next <= cur);
return next;
}
/**
* Computes the ceiling power of given <i>base</i> and <i>n</i> as doubles.
* This is the smallest positive power
* of <i>base</i> that equal to or greater than the given <i>n</i> and equal to a mathematical integer.
* The result of this function is consistent with {@link #ceilingIntPowerOf2(int)} for values
* less than one. I.e., if <i>n < 1,</i> the result is 1.
*
* @param base The base in the expression ⌈base<sup>n</sup>⌉.
* @param n The input argument.
* @return the ceiling power of <i>base</i> as a double and equal to a mathematical integer.
*/
public static double ceilingPowerBaseOfDouble(final double base, final double n) {
final double x = n < 1.0 ? 1.0 : n;
return pow(base, ceil(logBaseOfX(base, x)));
}
/**
* Computes the floor power of given <i>base</i> and <i>n</i> as doubles.
* This is the largest positive power
* of <i>base</i> that equal to or less than the given n and equal to a mathematical integer.
* The result of this function is consistent with {@link #floorPowerOf2(int)} for values
* less than one. I.e., if <i>n < 1,</i> the result is 1.
*
* @param base The base in the expression ⌊base<sup>n</sup>⌋.
* @param n The input argument.
* @return the floor power of 2 and equal to a mathematical integer.
*/
public static double floorPowerBaseOfDouble(final double base, final double n) {
final double x = n < 1.0 ? 1.0 : n;
return pow(base, floor(logBaseOfX(base, x)));
}
// Logarithm related
/**
* The log base 2 of the value
* @param value the given value
* @return The log base 2 of the value
*/
public static double log2(final double value) {
return log(value) / LOG2;
}
/**
* Returns the logarithm_logBase of x. Example: logB(2.0, x) = log(x) / log(2.0).
* @param logBase the base of the logarithm used
* @param x the given value
* @return the logarithm_logBase of x: Example: logB(2.0, x) = log(x) / log(2.0).
*/
public static double logBaseOfX(final double logBase, final double x) {
return log(x) / log(logBase);
}
/**
* Returns the number of one bits following the lowest-order ("rightmost") zero-bit in the
* two's complement binary representation of the specified long value, or 64 if the value is equal
* to minus one.
* @param v the value whose number of trailing ones is to be computed.
* @return the number of one bits following the lowest-order ("rightmost") zero-bit in the
* two's complement binary representation of the specified long value, or 64 if the value is equal
* to minus one.
*/
public static int numberOfTrailingOnes(final long v) {
return Long.numberOfTrailingZeros(~v);
}
/**
* Returns the number of one bits preceding the highest-order ("leftmost") zero-bit in the
* two's complement binary representation of the specified long value, or 64 if the value is equal
* to minus one.
* @param v the value whose number of leading ones is to be computed.
* @return the number of one bits preceding the lowest-order ("rightmost") zero-bit in the
* two's complement binary representation of the specified long value, or 64 if the value is equal
* to minus one.
*/
public static int numberOfLeadingOnes(final long v) {
return Long.numberOfLeadingZeros(~v);
}
/**
* Returns the log2 of the given int value if it is an exact power of 2 and greater than zero.
* If not, it throws an exception with the user supplied local argument name.
* @param powerOf2 must be a power of 2 and greater than zero.
* @param argName the argument name used in the exception if thrown.
* @return the log2 of the given value if it is an exact power of 2 and greater than zero.
* @throws SketchesArgumentException if not a power of 2 nor greater than zero.
*/
public static int exactLog2OfInt(final int powerOf2, final String argName) {
checkIfIntPowerOf2(powerOf2, argName);
return Integer.numberOfTrailingZeros(powerOf2);
}
/**
* Returns the log2 of the given long value if it is an exact power of 2 and greater than zero.
* If not, it throws an exception with the user supplied local argument name.
* @param powerOf2 must be a power of 2 and greater than zero.
* @param argName the argument name used in the exception if thrown.
* @return the log2 of the given value if it is an exact power of 2 and greater than zero.
* @throws SketchesArgumentException if not a power of 2 nor greater than zero.
*/
public static int exactLog2OfLong(final long powerOf2, final String argName) {
checkIfLongPowerOf2(powerOf2, argName);
return Long.numberOfTrailingZeros(powerOf2);
}
/**
* Returns the log2 of the given int value if it is an exact power of 2 and greater than zero.
* If not, it throws an exception.
* @param powerOf2 must be a power of 2 and greater than zero.
* @return the log2 of the given int value if it is an exact power of 2 and greater than zero.
*/
public static int exactLog2OfInt(final int powerOf2) {
if (!isIntPowerOf2(powerOf2)) {
throw new SketchesArgumentException("Argument 'powerOf2' must be a positive power of 2.");
}
return Long.numberOfTrailingZeros(powerOf2);
}
/**
* Returns the log2 of the given long value if it is an exact power of 2 and greater than zero.
* If not, it throws an exception.
* @param powerOf2 must be a power of 2 and greater than zero.
* @return the log2 of the given long value if it is an exact power of 2 and greater than zero.
*/
public static int exactLog2OfLong(final long powerOf2) {
if (!isLongPowerOf2(powerOf2)) {
throw new SketchesArgumentException("Argument 'powerOf2' must be a positive power of 2.");
}
return Long.numberOfTrailingZeros(powerOf2);
}
//Checks that throw
/**
* Check the requested offset and length against the allocated size.
* The invariants equation is: {@code 0 <= reqOff <= reqLen <= reqOff + reqLen <= allocSize}.
* If this equation is violated an {@link SketchesArgumentException} will be thrown.
* @param reqOff the requested offset
* @param reqLen the requested length
* @param allocSize the allocated size.
*/
public static void checkBounds(final long reqOff, final long reqLen, final long allocSize) {
if ((reqOff | reqLen | (reqOff + reqLen) | (allocSize - (reqOff + reqLen))) < 0) {
throw new SketchesArgumentException("Bounds Violation: "
+ "reqOffset: " + reqOff + ", reqLength: " + reqLen
+ ", (reqOff + reqLen): " + (reqOff + reqLen) + ", allocSize: " + allocSize);
}
}
/**
* Checks the given parameter to make sure it is positive and between 0.0 inclusive and 1.0
* inclusive.
*
* @param p
* <a href="{@docRoot}/resources/dictionary.html#p">See Sampling Probability, <i>p</i></a>
* @param argName Used in the thrown exception.
*/
public static void checkProbability(final double p, final String argName) {
if (p >= 0.0 && p <= 1.0) {
return;
}
throw new SketchesArgumentException("The value of the parameter \"" + argName
+ "\" must be between 0.0 inclusive and 1.0 inclusive: " + p);
}
//Boolean Checks
/**
* Unsigned compare with longs.
* @param n1 A long to be treated as if unsigned.
* @param n2 A long to be treated as if unsigned.
* @return true if n1 > n2.
*/
public static boolean isLessThanUnsigned(final long n1, final long n2) {
return n1 < n2 ^ n1 < 0 != n2 < 0;
}
/**
* Returns true if given n is even.
* @param n the given n
* @return true if given n is even.
*/
public static boolean isEven(final long n) {
return (n & 1L) == 0;
}
/**
* Returns true if given n is odd.
* @param n the given n
* @return true if given n is odd.
*/
public static boolean isOdd(final long n) {
return (n & 1L) == 1L;
}
/**
* Computes the number of decimal digits of the number n
* @param n the given number
* @return the number of decimal digits of the number n
*/
public static int numDigits(int n) {
if (n % 10 == 0) { n++; }
return (int) ceil(log(n) / log(10));
}
/**
* Converts the given number to a string prepended with spaces, if necessary, to
* match the given length.
*
* <p>For example, assume a sequence of integers from 1 to 1000. The largest value has
* four decimal digits. Convert the entire sequence of strings to the form " 1" to "1000".
* When these strings are sorted they will be in numerical sequence: " 1", " 2", ... "1000".</p>
*
* @param number the given number
* @param length the desired string length.
* @return the given number to a string prepended with spaces
*/
public static String intToFixedLengthString(final int number, final int length) {
final String num = Integer.toString(number);
return characterPad(num, length, ' ', false);
}
/**
* Finds the minimum of two generic items
* @param <T> the type
* @param item1 item one
* @param item2 item two
* @param c the given comparator
* @return the minimum value
*/
public static <T> Object minT(final Object item1, final Object item2, final Comparator<? super T> c) {
return c.compare((T)item1, (T)item2) <= 0 ? item1 : item2;
}
/**
* Finds the maximum of two generic items
* @param <T> the type
* @param item1 item one
* @param item2 item two
* @param c the given comparator
* @return the maximum value
*/
public static <T> Object maxT(final Object item1, final Object item2, final Comparator<? super T> c) {
return c.compare((T)item1, (T)item2) >= 0 ? item1 : item2;
}
/**
* Is item1 Less-Than item2
* @param <T> the type
* @param item1 item one
* @param item2 item two
* @param c the given comparator
* @return true if item1 Less-Than item2
*/
public static <T> boolean lt(final Object item1, final Object item2, final Comparator<? super T> c) {
return c.compare((T)item1, (T)item2) < 0;
}
/**
* Is item1 Less-Than-Or-Equal-To item2
* @param <T> the type
* @param item1 item one
* @param item2 item two
* @param c the given comparator
* @return true if item1 Less-Than-Or-Equal-To item2
*/
public static <T> boolean le(final Object item1, final Object item2, final Comparator<? super T> c) {
return c.compare((T)item1, (T)item2) <= 0;
}
}
| 2,684 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/common/SketchesStateException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.common;
/**
* Illegal State Exception class for the library
*
* @author Lee Rhodes
*/
public class SketchesStateException extends SketchesException {
private static final long serialVersionUID = 1L;
//other constructors to be added as needed.
/**
* Constructs a new runtime exception with the specified detail message. The cause is not
* initialized, and may subsequently be initialized by a call to
* Throwable.initCause(java.lang.Throwable).
*
* @param message the detail message. The detail message is saved for later retrieval by the
* Throwable.getMessage() method.
*/
public SketchesStateException(final String message) {
super(message);
}
}
| 2,685 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/common/SketchesReadOnlyException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.common;
/**
* Write operation attempted on a read-only class.
*
* @author Lee Rhodes
*/
public class SketchesReadOnlyException extends SketchesException {
private static final long serialVersionUID = 1L;
//other constructors to be added as needed.
/**
* Constructs a new runtime exception with the message:
* "Write operation attempted on a read-only class."
*
* <p>The cause is not initialized, and may subsequently be initialized by a call to
* Throwable.initCause(java.lang.Throwable).
*/
public SketchesReadOnlyException() {
super("Write operation attempted on a read-only class.");
}
/**
* Constructs a new runtime exception with the specified detail message. The cause is not
* initialized, and may subsequently be initialized by a call to
* Throwable.initCause(java.lang.Throwable).
*
* @param message the detail message. The detail message is saved for later retrieval by the
* Throwable.getMessage() method.
*/
public SketchesReadOnlyException(final String message) {
super(message);
}
}
| 2,686 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/common/ResizeFactor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.common;
/**
* For the Families that accept this configuration parameter, it controls the size multiple that
* affects how fast the internal cache grows, when more space is required.
* <a href="{@docRoot}/resources/dictionary.html#resizeFactor">See Resize Factor</a>
*
* @author Lee Rhodes
*/
public enum ResizeFactor {
/**
* Do not resize. Sketch will be configured to full size.
*/
X1(0),
/**
* Resize factor is 2.
*/
X2(1),
/**
* Resize factor is 4.
*/
X4(2),
/**
* Resize factor is 8.
*/
X8(3);
private int lg_;
ResizeFactor(final int lg) {
this.lg_ = lg;
}
/**
* Returns the Log-base 2 of the Resize Factor
* @return the Log-base 2 of the Resize Factor
*/
public int lg() {
return lg_;
}
/**
* Returns the Resize Factor given the Log-base 2 of the Resize Factor
* @param lg a value between zero and 3, inclusive.
* @return the Resize Factor given the Log-base 2 of the Resize Factor
*/
public static ResizeFactor getRF(final int lg) {
if (X1.lg() == lg) { return X1; }
if (X2.lg() == lg) { return X2; }
if (X4.lg() == lg) { return X4; }
return X8;
}
/**
* Returns the Resize Factor
* @return the Resize Factor
*/
public int getValue() {
return 1 << lg_;
}
}
| 2,687 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/common/ArrayOfBooleansSerDe.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.common;
import java.util.Objects;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
/**
* Methods of serializing and deserializing arrays of Boolean as a bit array.
*
* @author Jon Malkin
*/
public class ArrayOfBooleansSerDe extends ArrayOfItemsSerDe<Boolean> {
/**
* Computes number of bytes needed for packed bit encoding of the array of booleans. Rounds
* partial bytes up to return a whole number of bytes.
*
* @param arrayLength Number of items in the array to serialize
* @return Number of bytes needed to encode the array
*/
public static int computeBytesNeeded(final int arrayLength) {
return (arrayLength >>> 3) + ((arrayLength & 0x7) > 0 ? 1 : 0);
}
@Override
public byte[] serializeToByteArray(final Boolean item) {
Objects.requireNonNull(item, "Item must not be null");
final byte[] bytes = new byte[1];
bytes[0] = (item) ? (byte)1 : 0;
return bytes;
}
@Override
public byte[] serializeToByteArray(final Boolean[] items) {
Objects.requireNonNull(items, "Items must not be null");
final int bytesNeeded = computeBytesNeeded(items.length);
final byte[] bytes = new byte[bytesNeeded];
final WritableMemory mem = WritableMemory.writableWrap(bytes);
byte val = 0;
for (int i = 0; i < items.length; ++i) {
if (items[i]) {
val |= 0x1 << (i & 0x7);
}
if ((i & 0x7) == 0x7) {
mem.putByte(i >>> 3, val);
val = 0;
}
}
// write out any remaining values (if val=0, still good to be explicit)
if ((items.length & 0x7) > 0) {
mem.putByte(bytesNeeded - 1, val);
}
return bytes;
}
@Override
@Deprecated
public Boolean[] deserializeFromMemory(final Memory mem, final int numItems) {
return deserializeFromMemory(mem, 0, numItems);
}
@Override
public Boolean[] deserializeFromMemory(final Memory mem, final long offsetBytes, final int numItems) {
Objects.requireNonNull(mem, "Memory must not be null");
if (numItems <= 0) { return new Boolean[0]; }
final int numBytes = computeBytesNeeded(numItems);
Util.checkBounds(offsetBytes, numBytes, mem.getCapacity());
final Boolean[] array = new Boolean[numItems];
byte srcVal = 0;
for (int i = 0, b = 0; i < numItems; ++i) {
if ((i & 0x7) == 0x0) { // should trigger on first iteration
srcVal = mem.getByte(offsetBytes + b++);
}
array[i] = ((srcVal >>> (i & 0x7)) & 0x1) == 1;
}
return array;
}
@Override
public int sizeOf(final Boolean item) {
Objects.requireNonNull(item, "Item must not be null");
return computeBytesNeeded(1);
}
@Override //needs to override default due to the bit packing, which must be computed.
public int sizeOf(final Boolean[] items) {
Objects.requireNonNull(items, "Item must not be null");
return computeBytesNeeded(items.length);
}
@Override
public int sizeOf(final Memory mem, final long offsetBytes, final int numItems) {
Objects.requireNonNull(mem, "Memory must not be null");
return computeBytesNeeded(numItems);
}
@Override
public String toString(final Boolean item) {
if (item == null) { return "null"; }
return item ? "true" : "false";
}
@Override
public Class<Boolean> getClassOfT() { return Boolean.class; }
}
| 2,688 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/common/BoundsOnRatiosInSampledSets.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.common;
import static org.apache.datasketches.common.BoundsOnBinomialProportions.approximateLowerBoundOnP;
import static org.apache.datasketches.common.BoundsOnBinomialProportions.approximateUpperBoundOnP;
/**
* This class is used to compute the bounds on the estimate of the ratio <i>|B| / |A|</i>, where:
* <ul>
* <li><i>|A|</i> is the unknown size of a set <i>A</i> of unique identifiers.</li>
* <li><i>|B|</i> is the unknown size of a subset <i>B</i> of <i>A</i>.</li>
* <li><i>a</i> = <i>|S<sub>A</sub>|</i> is the observed size of a sample of <i>A</i>
* that was obtained by Bernoulli sampling with a known inclusion probability <i>f</i>.</li>
* <li><i>b</i> = <i>|S<sub>A</sub> ∩ B|</i> is the observed size of a subset
* of <i>S<sub>A</sub></i>.</li>
* </ul>
*
* @author Kevin Lang
*/
public final class BoundsOnRatiosInSampledSets {
private static final double NUM_STD_DEVS = 2.0; //made a constant to simplify interface.
private BoundsOnRatiosInSampledSets() {}
/**
* Return the approximate lower bound based on a 95% confidence interval
* @param a See class javadoc
* @param b See class javadoc
* @param f the inclusion probability used to produce the set with size <i>a</i> and should
* generally be less than 0.5. Above this value, the results not be reliable.
* When <i>f</i> = 1.0 this returns the estimate.
* @return the approximate upper bound
*/
public static double getLowerBoundForBoverA(final long a, final long b, final double f) {
checkInputs(a, b, f);
if (a == 0) { return 0.0; }
if (f == 1.0) { return (double) b / a; }
return approximateLowerBoundOnP(a, b, NUM_STD_DEVS * hackyAdjuster(f));
}
/**
* Return the approximate upper bound based on a 95% confidence interval
* @param a See class javadoc
* @param b See class javadoc
* @param f the inclusion probability used to produce the set with size <i>a</i>.
* @return the approximate lower bound
*/
public static double getUpperBoundForBoverA(final long a, final long b, final double f) {
checkInputs(a, b, f);
if (a == 0) { return 1.0; }
if (f == 1.0) { return (double) b / a; }
return approximateUpperBoundOnP(a, b, NUM_STD_DEVS * hackyAdjuster(f));
}
/**
* Return the estimate of b over a
* @param a See class javadoc
* @param b See class javadoc
* @return the estimate of b over a
*/
public static double getEstimateOfBoverA(final long a, final long b) {
checkInputs(a, b, 0.3);
if (a == 0) { return 0.5; }
return (double) b / a;
}
/**
* Return the estimate of A. See class javadoc.
* @param a See class javadoc
* @param f the inclusion probability used to produce the set with size <i>a</i>.
* @return the approximate lower bound
*/
public static double getEstimateOfA(final long a, final double f) {
checkInputs(a, 1, f);
return a / f;
}
/**
* Return the estimate of B. See class javadoc.
* @param b See class javadoc
* @param f the inclusion probability used to produce the set with size <i>b</i>.
* @return the approximate lower bound
*/
public static double getEstimateOfB(final long b, final double f) {
checkInputs(b + 1, b, f);
return b / f;
}
/**
* This hackyAdjuster is tightly coupled with the width of the confidence interval normally
* specified with number of standard deviations. To simplify this interface the number of
* standard deviations has been fixed to 2.0, which corresponds to a confidence interval of
* 95%.
* @param f the inclusion probability used to produce the set with size <i>a</i>.
* @return the hacky Adjuster
*/
private static double hackyAdjuster(final double f) {
final double tmp = Math.sqrt(1.0 - f);
return (f <= 0.5) ? tmp : tmp + (0.01 * (f - 0.5));
}
static void checkInputs(final long a, final long b, final double f) {
if ( ( (a - b) | (a) | (b) ) < 0) { //if any group goes negative
throw new SketchesArgumentException(
"a must be >= b and neither a nor b can be < 0: a = " + a + ", b = " + b);
}
if ((f > 1.0) || (f <= 0.0)) {
throw new SketchesArgumentException("Required: ((f <= 1.0) && (f > 0.0)): " + f);
}
}
}
| 2,689 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/common/Family.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.common;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
/**
* Defines the various families of sketch and set operation classes. A family defines a set of
* classes that share fundamental algorithms and behaviors. The classes within a family may
* still differ by how they are stored and accessed. For example, internally there are separate
* classes for the QuickSelect sketch algorithm that operate on the Java heap and off-heap.
* Not all of these families have parallel forms on and off-heap but are included for completeness.
*
* @author Lee Rhodes
*/
public enum Family {
/**
* The Alpha Sketch family is a member of the Theta Sketch Framework of sketches and is best
* suited for real-time processes where both the updating of the sketch and getting the estimate
* is performed directly on the sketch. In this situation the AlphaSketch has roughly a
* 30% improvement (~1/sqrt(2*k)) in its error distribution as compared to the QuickSelect
* (or similar KMV-derived) sketches.
*
* <p>If the AlphaSketch is fed into any SetOperation, the error distribution reverts back to the
* normal QuickSelect/KMV error distribution (~1/sqrt(k)). For this reason, the AlphaSketch
* does not have a sister class for off-heap operation. The Alpha Sketch has a roughly 30% faster
* overall update time as compared to the QuickSelect sketch family.</p>
*
* <p>The Alpha Sketch is created using the UpdateSketch.builder().
* <a href="{@docRoot}/resources/dictionary.html#alphaTCF">See Alpha TCF</a> and
* <a href="{@docRoot}/resources/dictionary.html#thetaSketch">Theta Sketch Framework</a>
*/
ALPHA(1, "Alpha", 3, 3),
/**
* The QuickSelect Sketch family is a member of the Theta Sketch Framework of sketches and
* is the workhorse of the Theta Sketch Families and can be constructed for either on-heap or
* off-heap operation.
* The QuickSelect Sketch is created using the UpdateSketch.builder().
* <a href="{@docRoot}/resources/dictionary.html#quickSelectTCF">See Quick Select TCF</a>
*/
QUICKSELECT(2, "QuickSelect", 3, 3),
/**
* The Compact Sketch family is a member of the Theta Sketch Framework of sketches.
* The are read-only and cannot be updated, but can participate in any of the Set Operations.
* The compact sketches are never created directly with a constructor or Builder.
* Instead they are created as a result of the compact()
* method of an UpdateSketch or as a result of a getSketchSamples() of a SetOperation.
*/
COMPACT(3, "Compact", 1, 3),
/**
* The Union family is an operation for the Theta Sketch Framework of sketches.
* The Union is constructed using the SetOperation.builder().
*/
UNION(4, "Union", 4, 4),
/**
* The Intersection family is an operation for the Theta Sketch Framework of sketches.
* The Intersection is constructed using the SetOperation.builder().
*/
INTERSECTION(5, "Intersection", 3, 3),
/**
* The A and not B family is an operation for the Theta Sketch Framework of sketches.
* The AnotB operation is constructed using the SetOperation.builder().
*/
A_NOT_B(6, "AnotB", 3, 3),
/**
* The HLL family of sketches. (Not part of TSF.)
*/
HLL(7, "HLL", 1, 1),
/**
* The Quantiles family of sketches. (Not part of TSF.)
*/
QUANTILES(8, "QUANTILES", 1, 2),
/**
* The Tuple family of sketches is a large family of sketches that are extensions of the
* Theta Sketch Framework.
*/
TUPLE(9, "TUPLE", 1, 3),
/**
* The Frequency family of sketches. (Not part of TSF.)
*/
FREQUENCY(10, "FREQUENCY", 1, 4),
/**
* The Reservoir family of sketches. (Not part of TSF.)
*/
RESERVOIR(11, "RESERVOIR", 1, 2),
/**
* The reservoir sampling family of Union operations. (Not part of TSF.)
*/
RESERVOIR_UNION(12, "RESERVOIR_UNION", 1, 1),
/**
* The VarOpt family of sketches. (Not part of TSF.)
*/
VAROPT(13, "VAROPT", 1, 4),
/**
* The VarOpt family of sketches. (Not part of TSF.)
*/
VAROPT_UNION(14, "VAROPT_UNION", 1, 4),
/**
* KLL quantiles sketch
*/
KLL(15, "KLL", 1, 2),
/**
* Compressed Probabilistic Counting (CPC) Sketch
*/
CPC(16, "CPC", 1, 5),
/**
* Relative Error Quantiles Sketch
*/
REQ(17, "REQ", 1, 2),
/**
* CountMin Sketch
*/
COUNTMIN(18, "COUNTMIN", 2, 2);
private static final Map<Integer, Family> lookupID = new HashMap<>();
private static final Map<String, Family> lookupFamName = new HashMap<>();
private int id_;
private String famName_;
private int minPreLongs_;
private int maxPreLongs_;
static {
for (final Family f : values()) {
lookupID.put(f.getID(), f);
lookupFamName.put(f.getFamilyName().toUpperCase(Locale.US), f);
}
}
private Family(final int id, final String famName, final int minPreLongs, final int maxPreLongs) {
id_ = id;
famName_ = famName.toUpperCase(Locale.US);
minPreLongs_ = minPreLongs;
maxPreLongs_ = maxPreLongs;
}
/**
* Returns the byte ID for this family
* @return the byte ID for this family
*/
@SuppressFBWarnings(value = "NM_CONFUSING", justification = "Harmless, will not fix")
public int getID() {
return id_;
}
/**
*
* @param id the given id, a value < 128.
*/
public void checkFamilyID(final int id) {
if (id != id_) {
throw new SketchesArgumentException(
"Possible Corruption: This Family " + toString()
+ " does not match the ID of the given Family: " + idToFamily(id).toString());
}
}
/**
* Returns the name for this family
* @return the name for this family
*/
public String getFamilyName() {
return famName_;
}
/**
* Returns the minimum preamble size for this family in longs
* @return the minimum preamble size for this family in longs
*/
public int getMinPreLongs() {
return minPreLongs_;
}
/**
* Returns the maximum preamble size for this family in longs
* @return the maximum preamble size for this family in longs
*/
public int getMaxPreLongs() {
return maxPreLongs_;
}
@Override
public String toString() {
return famName_;
}
/**
* Returns the Family given the ID
* @param id the given ID
* @return the Family given the ID
*/
public static Family idToFamily(final int id) {
final Family f = lookupID.get(id);
if (f == null) {
throw new SketchesArgumentException("Possible Corruption: Illegal Family ID: " + id);
}
return f;
}
/**
* Returns the Family given the family name
* @param famName the family name
* @return the Family given the family name
*/
public static Family stringToFamily(final String famName) {
final Family f = lookupFamName.get(famName.toUpperCase(Locale.US));
if (f == null) {
throw new SketchesArgumentException("Possible Corruption: Illegal Family Name: " + famName);
}
return f;
}
}
| 2,690 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/common/ByteArrayUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.common;
/**
* Useful methods for byte arrays.
* @author Lee Rhodes
*/
public final class ByteArrayUtil {
/**
* Copies bytes from source to target with offsets on both the source and target.
* @param source the given source
* @param srcStart the source starting index
* @param target the give target
* @param tgtStart the target starting index
* @param numBytes the number of bytes to be transferred.
*/
public static void copyBytes(final byte[] source, final int srcStart,
final byte[] target, final int tgtStart, final int numBytes) {
Util.checkBounds(srcStart, numBytes, source.length);
Util.checkBounds(tgtStart, numBytes, target.length);
for (int i = 0, j = srcStart, k = tgtStart; i < numBytes; i++) {
target[k++] = source[j++];
}
}
/**
* Get a <i>short</i> from the given byte array starting at the given offset
* in little endian order.
* There is no bounds checking.
* @param array source byte array
* @param offset source offset
* @return the <i>short</i>
*/
public static short getShortLE(final byte[] array, final int offset) {
return (short) ((array[offset ] & 0XFF )
| ((array[offset + 1] & 0XFF) << 8));
}
/**
* Put the source <i>short</i> into the destination byte array starting at the given offset
* in little endian order.
* There is no bounds checking.
* @param array destination byte array
* @param offset destination offset
* @param value source <i>short</i>
*/
public static void putShortLE(final byte[] array, final int offset, final short value) {
array[offset ] = (byte) (value );
array[offset + 1] = (byte) (value >>> 8);
}
/**
* Get a <i>short</i> from the given byte array starting at the given offset
* in big endian order.
* There is no bounds checking.
* @param array source byte array
* @param offset source offset
* @return the <i>short</i>
*/
public static short getShortBE(final byte[] array, final int offset) {
return (short) ((array[offset + 1] & 0XFF )
| ((array[offset ] & 0XFF) << 8));
}
/**
* Put the source <i>short</i> into the destination byte array starting at the given offset
* in big endian order.
* There is no bounds checking.
* @param array destination byte array
* @param offset destination offset
* @param value source <i>short</i>
*/
public static void putShortBE(final byte[] array, final int offset, final short value) {
array[offset + 1] = (byte) (value );
array[offset ] = (byte) (value >>> 8);
}
/**
* Get a <i>int</i> from the given byte array starting at the given offset
* in little endian order.
* There is no bounds checking.
* @param array source byte array
* @param offset source offset
* @return the <i>int</i>
*/
public static int getIntLE(final byte[] array, final int offset) {
return ( array[offset ] & 0XFF )
| ((array[offset + 1] & 0XFF) << 8)
| ((array[offset + 2] & 0XFF) << 16)
| ((array[offset + 3] & 0XFF) << 24);
}
/**
* Put the source <i>int</i> into the destination byte array starting at the given offset
* in little endian order.
* There is no bounds checking.
* @param array destination byte array
* @param offset destination offset
* @param value source <i>int</i>
*/
public static void putIntLE(final byte[] array, final int offset, final int value) {
array[offset ] = (byte) (value );
array[offset + 1] = (byte) (value >>> 8);
array[offset + 2] = (byte) (value >>> 16);
array[offset + 3] = (byte) (value >>> 24);
}
/**
* Get a <i>int</i> from the given byte array starting at the given offset
* in big endian order.
* There is no bounds checking.
* @param array source byte array
* @param offset source offset
* @return the <i>int</i>
*/
public static int getIntBE(final byte[] array, final int offset) {
return ( array[offset + 3] & 0XFF )
| ((array[offset + 2] & 0XFF) << 8)
| ((array[offset + 1] & 0XFF) << 16)
| ((array[offset ] & 0XFF) << 24);
}
/**
* Put the source <i>int</i> into the destination byte array starting at the given offset
* in big endian order.
* There is no bounds checking.
* @param array destination byte array
* @param offset destination offset
* @param value source <i>int</i>
*/
public static void putIntBE(final byte[] array, final int offset, final int value) {
array[offset + 3] = (byte) (value );
array[offset + 2] = (byte) (value >>> 8);
array[offset + 1] = (byte) (value >>> 16);
array[offset ] = (byte) (value >>> 24);
}
/**
* Get a <i>long</i> from the given byte array starting at the given offset
* in little endian order.
* There is no bounds checking.
* @param array source byte array
* @param offset source offset
* @return the <i>long</i>
*/
public static long getLongLE(final byte[] array, final int offset) {
return ( array[offset ] & 0XFFL )
| ((array[offset + 1] & 0XFFL) << 8)
| ((array[offset + 2] & 0XFFL) << 16)
| ((array[offset + 3] & 0XFFL) << 24)
| ((array[offset + 4] & 0XFFL) << 32)
| ((array[offset + 5] & 0XFFL) << 40)
| ((array[offset + 6] & 0XFFL) << 48)
| ((array[offset + 7] & 0XFFL) << 56);
}
/**
* Put the source <i>long</i> into the destination byte array starting at the given offset
* in little endian order.
* There is no bounds checking.
* @param array destination byte array
* @param offset destination offset
* @param value source <i>long</i>
*/
public static void putLongLE(final byte[] array, final int offset, final long value) {
array[offset ] = (byte) (value );
array[offset + 1] = (byte) (value >>> 8);
array[offset + 2] = (byte) (value >>> 16);
array[offset + 3] = (byte) (value >>> 24);
array[offset + 4] = (byte) (value >>> 32);
array[offset + 5] = (byte) (value >>> 40);
array[offset + 6] = (byte) (value >>> 48);
array[offset + 7] = (byte) (value >>> 56);
}
/**
* Get a <i>long</i> from the source byte array starting at the given offset
* in big endian order.
* There is no bounds checking.
* @param array source byte array
* @param offset source starting point
* @return the <i>long</i>
*/
public static long getLongBE(final byte[] array, final int offset) {
return ( array[offset + 7] & 0XFFL )
| ((array[offset + 6] & 0XFFL) << 8)
| ((array[offset + 5] & 0XFFL) << 16)
| ((array[offset + 4] & 0XFFL) << 24)
| ((array[offset + 3] & 0XFFL) << 32)
| ((array[offset + 2] & 0XFFL) << 40)
| ((array[offset + 1] & 0XFFL) << 48)
| ((array[offset ] & 0XFFL) << 56);
}
/**
* Put the source <i>long</i> into the destination byte array starting at the given offset
* in big endian order.
* There is no bounds checking.
* @param array destination byte array
* @param offset destination starting point
* @param value source <i>long</i>
*/
public static void putLongBE(final byte[] array, final int offset, final long value) {
array[offset + 7] = (byte) (value );
array[offset + 6] = (byte) (value >>> 8);
array[offset + 5] = (byte) (value >>> 16);
array[offset + 4] = (byte) (value >>> 24);
array[offset + 3] = (byte) (value >>> 32);
array[offset + 2] = (byte) (value >>> 40);
array[offset + 1] = (byte) (value >>> 48);
array[offset ] = (byte) (value >>> 56);
}
/**
* Get a <i>float</i> from the given byte array starting at the given offset
* in little endian order.
* There is no bounds checking.
* @param array source byte array
* @param offset source offset
* @return the <i>float</i>
*/
public static float getFloatLE(final byte[] array, final int offset) {
return Float.intBitsToFloat(getIntLE(array, offset));
}
/**
* Put the source <i>float</i> into the destination byte array starting at the given offset
* in little endian order.
* There is no bounds checking.
* @param array destination byte array
* @param offset destination offset
* @param value source <i>float</i>
*/
public static void putFloatLE(final byte[] array, final int offset, final float value) {
putIntLE(array, offset, Float.floatToRawIntBits(value));
}
/**
* Get a <i>float</i> from the given byte array starting at the given offset
* in big endian order.
* There is no bounds checking.
* @param array source byte array
* @param offset source offset
* @return the <i>float</i>
*/
public static float getFloatBE(final byte[] array, final int offset) {
return Float.intBitsToFloat(getIntBE(array, offset));
}
/**
* Put the source <i>float</i> into the destination byte array starting at the given offset
* in big endian order.
* There is no bounds checking.
* @param array destination byte array
* @param offset destination offset
* @param value source <i>float</i>
*/
public static void putFloatBE(final byte[] array, final int offset, final float value) {
putIntBE(array, offset, Float.floatToRawIntBits(value));
}
/**
* Get a <i>double</i> from the given byte array starting at the given offset
* in little endian order.
* There is no bounds checking.
* @param array source byte array
* @param offset source offset
* @return the <i>double</i>
*/
public static double getDoubleLE(final byte[] array, final int offset) {
return Double.longBitsToDouble(getLongLE(array, offset));
}
/**
* Put the source <i>double</i> into the destination byte array starting at the given offset
* in little endian order.
* There is no bounds checking.
* @param array destination byte array
* @param offset destination offset
* @param value source <i>double</i>
*/
public static void putDoubleLE(final byte[] array, final int offset, final double value) {
putLongLE(array, offset, Double.doubleToRawLongBits(value));
}
/**
* Get a <i>double</i> from the given byte array starting at the given offset
* in big endian order.
* There is no bounds checking.
* @param array source byte array
* @param offset source offset
* @return the <i>double</i>
*/
public static double getDoubleBE(final byte[] array, final int offset) {
return Double.longBitsToDouble(getLongBE(array, offset));
}
/**
* Put the source <i>double</i> into the destination byte array starting at the given offset
* in big endian order.
* There is no bounds checking.
* @param array destination byte array
* @param offset destination offset
* @param value source <i>double</i>
*/
public static void putDoubleBE(final byte[] array, final int offset, final double value) {
putLongBE(array, offset, Double.doubleToRawLongBits(value));
}
}
| 2,691 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/common/ArrayOfLongsSerDe.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.common;
import static org.apache.datasketches.common.ByteArrayUtil.putLongLE;
import java.util.Objects;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
/**
* Methods of serializing and deserializing arrays of Long.
*
* @author Alexander Saydakov
*/
public class ArrayOfLongsSerDe extends ArrayOfItemsSerDe<Long> {
@Override
public byte[] serializeToByteArray(final Long item) {
Objects.requireNonNull(item, "Item must not be null");
final byte[] byteArr = new byte[Long.BYTES];
putLongLE(byteArr, 0, item.longValue());
return byteArr;
}
@Override
public byte[] serializeToByteArray(final Long[] items) {
Objects.requireNonNull(items, "Items must not be null");
if (items.length == 0) { return new byte[0]; }
final byte[] bytes = new byte[Long.BYTES * items.length];
final WritableMemory mem = WritableMemory.writableWrap(bytes);
long offset = 0;
for (int i = 0; i < items.length; i++) {
mem.putLong(offset, items[i]);
offset += Long.BYTES;
}
return bytes;
}
@Override
@Deprecated
public Long[] deserializeFromMemory(final Memory mem, final int numItems) {
return deserializeFromMemory(mem, 0, numItems);
}
@Override
public Long[] deserializeFromMemory(final Memory mem, final long offsetBytes, final int numItems) {
Objects.requireNonNull(mem, "Memory must not be null");
if (numItems <= 0) { return new Long[0]; }
long offset = offsetBytes;
Util.checkBounds(offset, Long.BYTES * (long)numItems, mem.getCapacity());
final Long[] array = new Long[numItems];
for (int i = 0; i < numItems; i++) {
array[i] = mem.getLong(offset);
offset += Long.BYTES;
}
return array;
}
@Override
public int sizeOf(final Long item) {
Objects.requireNonNull(item, "Item must not be null");
return Long.BYTES;
}
@Override //override because this is simpler
public int sizeOf(final Long[] items) {
Objects.requireNonNull(items, "Items must not be null");
return items.length * Long.BYTES;
}
@Override
public int sizeOf(final Memory mem, final long offsetBytes, final int numItems) {
Objects.requireNonNull(mem, "Memory must not be null");
return numItems * Long.BYTES;
}
@Override
public String toString(final Long item) {
if (item == null) { return "null"; }
return item.toString();
}
@Override
public Class<Long> getClassOfT() { return Long.class; }
}
| 2,692 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/common/ArrayOfStringsSerDe.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.common;
import static org.apache.datasketches.common.ByteArrayUtil.copyBytes;
import static org.apache.datasketches.common.ByteArrayUtil.putIntLE;
import java.nio.charset.StandardCharsets;
import java.util.Objects;
import org.apache.datasketches.memory.Memory;
/**
* Methods of serializing and deserializing arrays of String.
* This class serializes strings in UTF-8 format, which is more compact compared to
* {@link ArrayOfUtf16StringsSerDe}. In an extreme case when all strings are in ASCII,
* this method is 2 times more compact, but it takes more time to encode and decode
* by a factor of 1.5 to 2.
*
* <p>The serialization
*
* @author Alexander Saydakov
*/
public class ArrayOfStringsSerDe extends ArrayOfItemsSerDe<String> {
@Override
public byte[] serializeToByteArray(final String item) {
Objects.requireNonNull(item, "Item must not be null");
if (item.isEmpty()) { return new byte[] { 0, 0, 0, 0 }; }
final byte[] utf8ByteArr = item.getBytes(StandardCharsets.UTF_8);
final int numBytes = utf8ByteArr.length;
final byte[] out = new byte[numBytes + Integer.BYTES];
copyBytes(utf8ByteArr, 0, out, 4, numBytes);
putIntLE(out, 0, numBytes);
return out;
}
@Override
public byte[] serializeToByteArray(final String[] items) {
Objects.requireNonNull(items, "Items must not be null");
if (items.length == 0) { return new byte[0]; }
int totalBytes = 0;
final int numItems = items.length;
final byte[][] serialized2DArray = new byte[numItems][];
for (int i = 0; i < numItems; i++) {
serialized2DArray[i] = items[i].getBytes(StandardCharsets.UTF_8);
totalBytes += serialized2DArray[i].length + Integer.BYTES;
}
final byte[] bytesOut = new byte[totalBytes];
int offset = 0;
for (int i = 0; i < numItems; i++) {
final int utf8len = serialized2DArray[i].length;
putIntLE(bytesOut, offset, utf8len);
offset += Integer.BYTES;
copyBytes(serialized2DArray[i], 0, bytesOut, offset, utf8len);
offset += utf8len;
}
return bytesOut;
}
@Override
@Deprecated
public String[] deserializeFromMemory(final Memory mem, final int numItems) {
return deserializeFromMemory(mem, 0, numItems);
}
@Override
public String[] deserializeFromMemory(final Memory mem, final long offsetBytes, final int numItems) {
Objects.requireNonNull(mem, "Memory must not be null");
if (numItems <= 0) { return new String[0]; }
final String[] array = new String[numItems];
long offset = offsetBytes;
for (int i = 0; i < numItems; i++) {
Util.checkBounds(offset, Integer.BYTES, mem.getCapacity());
final int strLength = mem.getInt(offset);
offset += Integer.BYTES;
final byte[] utf8Bytes = new byte[strLength];
Util.checkBounds(offset, strLength, mem.getCapacity());
mem.getByteArray(offset, utf8Bytes, 0, strLength);
offset += strLength;
array[i] = new String(utf8Bytes, StandardCharsets.UTF_8);
}
return array;
}
@Override
public int sizeOf(final String item) {
Objects.requireNonNull(item, "Item must not be null");
if (item.isEmpty()) { return Integer.BYTES; }
return item.getBytes(StandardCharsets.UTF_8).length + Integer.BYTES;
}
@Override
public int sizeOf(final Memory mem, final long offsetBytes, final int numItems) {
Objects.requireNonNull(mem, "Memory must not be null");
if (numItems <= 0) { return 0; }
long offset = offsetBytes;
final long memCap = mem.getCapacity();
for (int i = 0; i < numItems; i++) {
Util.checkBounds(offset, Integer.BYTES, memCap);
final int itemLenBytes = mem.getInt(offset);
offset += Integer.BYTES;
Util.checkBounds(offset, itemLenBytes, memCap);
offset += itemLenBytes;
}
return (int)(offset - offsetBytes);
}
@Override
public String toString(final String item) {
if (item == null) { return "null"; }
return item;
}
@Override
public Class<String> getClassOfT() { return String.class; }
}
| 2,693 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/common/SuppressFBWarnings.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.common;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
/**
* Used to suppress SpotBug warnings.
*
* @author Lee Rhodes
*/
@Retention(RetentionPolicy.CLASS)
public @interface SuppressFBWarnings {
/**
* A list of comma-separated, quoted SpotBugs warnings that are to be suppressed in the associated
* annotated element. The value can be a bug category, kind or pattern.
* @return list of relevant bug descriptors
*/
String[] value() default {};
/**
* Optional explanation for the suppression.
* @return explanation
*/
String justification() default "";
}
| 2,694 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/common/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* This package is for common classes that may be used across all the sketch families.
*/
package org.apache.datasketches.common;
| 2,695 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/common/ArrayOfUtf16StringsSerDe.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.common;
import static org.apache.datasketches.common.ByteArrayUtil.copyBytes;
import static org.apache.datasketches.common.ByteArrayUtil.putIntLE;
import java.nio.charset.StandardCharsets;
import java.util.Objects;
import org.apache.datasketches.memory.Memory;
/**
* Methods of serializing and deserializing arrays of String.
* This class serializes strings using internal Java representation as char[], where each char
* is a 16-bit code. The result is larger than one from {@link ArrayOfStringsSerDe}.
* In an extreme case when all strings are in ASCII, the size is doubled. However it takes
* less time to serialize and deserialize by a factor of 1.5 to 2.
*
* @author Alexander Saydakov
*/
public class ArrayOfUtf16StringsSerDe extends ArrayOfItemsSerDe<String> {
@Override
public byte[] serializeToByteArray(final String item) {
Objects.requireNonNull(item, "Item must not be null");
final byte[] utf16ByteArr = item.getBytes(StandardCharsets.UTF_16); //includes BOM
final int numBytes = utf16ByteArr.length;
final byte[] out = new byte[numBytes + Integer.BYTES];
copyBytes(utf16ByteArr, 0, out, 4, numBytes);
putIntLE(out, 0, numBytes);
return out;
}
@Override
public byte[] serializeToByteArray(final String[] items) {
Objects.requireNonNull(items, "Items must not be null");
int totalBytes = 0;
final int numItems = items.length;
final byte[][] serialized2DArray = new byte[numItems][];
for (int i = 0; i < numItems; i++) {
serialized2DArray[i] = items[i].getBytes(StandardCharsets.UTF_16);
totalBytes += serialized2DArray[i].length + Integer.BYTES;
}
final byte[] bytesOut = new byte[totalBytes];
int offset = 0;
for (int i = 0; i < numItems; i++) {
final int utf8len = serialized2DArray[i].length;
putIntLE(bytesOut, offset, utf8len);
offset += Integer.BYTES;
copyBytes(serialized2DArray[i], 0, bytesOut, offset, utf8len);
offset += utf8len;
}
return bytesOut;
}
@Override
@Deprecated
public String[] deserializeFromMemory(final Memory mem, final int numItems) {
return deserializeFromMemory(mem, 0, numItems);
}
@Override
public String[] deserializeFromMemory(final Memory mem, final long offsetBytes, final int numItems) {
Objects.requireNonNull(mem, "Memory must not be null");
if (numItems <= 0) { return new String[0]; }
final String[] array = new String[numItems];
long offset = offsetBytes;
for (int i = 0; i < numItems; i++) {
Util.checkBounds(offset, Integer.BYTES, mem.getCapacity());
final int strLength = mem.getInt(offset);
offset += Integer.BYTES;
final byte[] utf16Bytes = new byte[strLength];
Util.checkBounds(offset, strLength, mem.getCapacity());
mem.getByteArray(offset, utf16Bytes, 0, strLength);
offset += strLength;
array[i] = new String(utf16Bytes, StandardCharsets.UTF_16);
}
return array;
}
@Override
public int sizeOf(final String item) {
Objects.requireNonNull(item, "Item must not be null");
return item.getBytes(StandardCharsets.UTF_16).length + Integer.BYTES;
}
@Override
public int sizeOf(final Memory mem, final long offsetBytes, final int numItems) {
Objects.requireNonNull(mem, "Memory must not be null");
long offset = offsetBytes;
final long memCap = mem.getCapacity();
for (int i = 0; i < numItems; i++) {
Util.checkBounds(offset, Integer.BYTES, memCap);
final int itemLenBytes = mem.getInt(offset);
offset += Integer.BYTES;
Util.checkBounds(offset, itemLenBytes, memCap);
offset += itemLenBytes;
}
return (int)(offset - offsetBytes);
}
@Override
public String toString(final String item) {
if (item == null) { return "null"; }
return item;
}
@Override
public Class<String> getClassOfT() { return String.class; }
}
| 2,696 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/common/ArrayOfNumbersSerDe.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.common;
import static org.apache.datasketches.common.ByteArrayUtil.copyBytes;
import static org.apache.datasketches.common.ByteArrayUtil.putDoubleLE;
import static org.apache.datasketches.common.ByteArrayUtil.putFloatLE;
import static org.apache.datasketches.common.ByteArrayUtil.putIntLE;
import static org.apache.datasketches.common.ByteArrayUtil.putLongLE;
import static org.apache.datasketches.common.ByteArrayUtil.putShortLE;
import java.util.Objects;
import org.apache.datasketches.memory.Memory;
/**
* Methods of serializing and deserializing arrays of the object version of primitive types of
* Number. The array can be a mix of primitive object types.
*
* <p>This class serializes numbers with a leading byte (ASCII character) indicating the type.
* The class keeps the values byte aligned, even though only 3 bits are strictly necessary to
* encode one of the 6 different primitives with object types that extend Number.</p>
*
* <p>Classes handled are: <code>Long</code>, <code>Integer</code>, <code>Short</code>,
* <code>Byte</code>, <code>Double</code>, and <code>Float</code>.</p>
*
* @author Jon Malkin
*/
public class ArrayOfNumbersSerDe extends ArrayOfItemsSerDe<Number> {
// values selected to enable backwards compatibility
private static final byte LONG_INDICATOR = 12;
private static final byte INTEGER_INDICATOR = 9;
private static final byte SHORT_INDICATOR = 3;
private static final byte BYTE_INDICATOR = 2;
private static final byte DOUBLE_INDICATOR = 4;
private static final byte FLOAT_INDICATOR = 6;
@Override
public byte[] serializeToByteArray(final Number item) {
Objects.requireNonNull(item, "Item must not be null");
final byte[] byteArr;
if (item instanceof Long) {
byteArr = new byte[Long.BYTES + 1];
byteArr[0] = LONG_INDICATOR;
putLongLE(byteArr, 1, (Long)item);
} else if (item instanceof Integer) {
byteArr = new byte[Integer.BYTES + 1];
byteArr[0] = INTEGER_INDICATOR;
putIntLE(byteArr, 1, (Integer)item);
} else if (item instanceof Short) {
byteArr = new byte[Short.BYTES + 1];
byteArr[0] = SHORT_INDICATOR;
putShortLE(byteArr, 1, (Short)item);
} else if (item instanceof Byte) {
byteArr = new byte[Byte.BYTES + 1];
byteArr[0] = BYTE_INDICATOR;
byteArr[1] = (byte)item;
} else if (item instanceof Double) {
byteArr = new byte[Double.BYTES + 1];
byteArr[0] = DOUBLE_INDICATOR;
putDoubleLE(byteArr, 1, (Double)item);
} else if (item instanceof Float) {
byteArr = new byte[Float.BYTES + 1];
byteArr[0] = FLOAT_INDICATOR;
putFloatLE(byteArr, 1, (Float)item);
} else {
throw new SketchesArgumentException(
"Item must be one of: Long, Integer, Short, Byte, Double, Float. "
+ "item: " + item.toString());
}
return byteArr;
}
@Override
public byte[] serializeToByteArray(final Number[] items) {
Objects.requireNonNull(items, "Items must not be null");
final int numItems = items.length;
int totalBytes = 0;
final byte[][] serialized2DArray = new byte[numItems][];
for (int i = 0; i < numItems; i++) {
serialized2DArray[i] = serializeToByteArray(items[i]);
totalBytes += serialized2DArray[i].length;
}
final byte[] out = new byte[totalBytes];
int offset = 0;
for (int i = 0; i < numItems; i++) {
final int itemLen = serialized2DArray[i].length;
copyBytes(serialized2DArray[i], 0, out, offset, itemLen);
offset += itemLen;
}
return out;
}
@Override
@Deprecated
public Number[] deserializeFromMemory(final Memory mem, final int numItems) {
return deserializeFromMemory(mem, 0, numItems);
}
@Override
public Number[] deserializeFromMemory(final Memory mem, final long offsetBytes, final int numItems) {
Objects.requireNonNull(mem, "Memory must not be null");
if (numItems <= 0) { return new Number[0]; }
final Number[] array = new Number[numItems];
long offset = offsetBytes;
for (int i = 0; i < numItems; i++) {
Util.checkBounds(offset, Byte.BYTES, mem.getCapacity());
final byte typeId = mem.getByte(offset);
offset += Byte.BYTES;
switch (typeId) {
case LONG_INDICATOR:
Util.checkBounds(offset, Long.BYTES, mem.getCapacity());
array[i] = mem.getLong(offset);
offset += Long.BYTES;
break;
case INTEGER_INDICATOR:
Util.checkBounds(offset, Integer.BYTES, mem.getCapacity());
array[i] = mem.getInt(offset);
offset += Integer.BYTES;
break;
case SHORT_INDICATOR:
Util.checkBounds(offset, Short.BYTES, mem.getCapacity());
array[i] = mem.getShort(offset);
offset += Short.BYTES;
break;
case BYTE_INDICATOR:
Util.checkBounds(offset, Byte.BYTES, mem.getCapacity());
array[i] = mem.getByte(offset);
offset += Byte.BYTES;
break;
case DOUBLE_INDICATOR:
Util.checkBounds(offset, Double.BYTES, mem.getCapacity());
array[i] = mem.getDouble(offset);
offset += Double.BYTES;
break;
case FLOAT_INDICATOR:
Util.checkBounds(offset, Float.BYTES, mem.getCapacity());
array[i] = mem.getFloat(offset);
offset += Float.BYTES;
break;
default:
throw new SketchesArgumentException(
"Item must be one of: Long, Integer, Short, Byte, Double, Float. "
+ "index: " + i + ", typeId: " + typeId);
}
}
return array;
}
@Override
public int sizeOf(final Number item) {
Objects.requireNonNull(item, "Item must not be null");
if ( item instanceof Long) { return Byte.BYTES + Long.BYTES; }
else if ( item instanceof Integer) { return Byte.BYTES + Integer.BYTES; }
else if ( item instanceof Short) { return Byte.BYTES + Short.BYTES; }
else if ( item instanceof Byte) { return Byte.BYTES + Byte.BYTES; }
else if ( item instanceof Double) { return Byte.BYTES + Double.BYTES; }
else if ( item instanceof Float) { return Byte.BYTES + Float.BYTES; }
else { throw new SketchesArgumentException(
"Item must be one of: Long, Integer, Short, Byte, Double, Float. "
+ "item: " + item.toString()); }
}
@Override
public int sizeOf(final Number[] items) {
Objects.requireNonNull(items, "Items must not be null");
int totalBytes = 0;
for (final Number item : items) {
totalBytes += sizeOf(item);
}
return totalBytes;
}
@Override
public int sizeOf(final Memory mem, final long offsetBytes, final int numItems) {
Objects.requireNonNull(mem, "Memory must not be null");
long offset = offsetBytes;
for (int i = 0; i < numItems; i++) {
Util.checkBounds(offset, Byte.BYTES, mem.getCapacity());
final byte typeId = mem.getByte(offset);
offset += Byte.BYTES;
switch (typeId) {
case LONG_INDICATOR:
Util.checkBounds(offset, Long.BYTES, mem.getCapacity());
offset += Long.BYTES;
break;
case INTEGER_INDICATOR:
Util.checkBounds(offset, Integer.BYTES, mem.getCapacity());
offset += Integer.BYTES;
break;
case SHORT_INDICATOR:
Util.checkBounds(offset, Short.BYTES, mem.getCapacity());
offset += Short.BYTES;
break;
case BYTE_INDICATOR:
Util.checkBounds(offset, Byte.BYTES, mem.getCapacity());
offset += Byte.BYTES;
break;
case DOUBLE_INDICATOR:
Util.checkBounds(offset, Double.BYTES, mem.getCapacity());
offset += Double.BYTES;
break;
case FLOAT_INDICATOR:
Util.checkBounds(offset, Float.BYTES, mem.getCapacity());
offset += Float.BYTES;
break;
default:
throw new SketchesArgumentException(
"Item must be one of: Long, Integer, Short, Byte, Double, Float. "
+ "index: " + i + ", typeId: " + typeId);
}
}
return (int)(offset - offsetBytes);
}
@Override
public String toString(final Number item) {
if (item == null) { return "null"; }
return item.toString();
}
@Override
public Class<Number> getClassOfT() { return Number.class; }
}
| 2,697 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/common/SketchesException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.common;
/**
* Exception class for the library
*
* @author Lee Rhodes
*/
public class SketchesException extends RuntimeException {
private static final long serialVersionUID = 1L;
//other constructors to be added as needed.
/**
* Constructs a new runtime exception with the specified detail message. The cause is not
* initialized, and may subsequently be initialized by a call to
* Throwable.initCause(java.lang.Throwable).
*
* @param message the detail message. The detail message is saved for later retrieval by the
* Throwable.getMessage() method.
*/
public SketchesException(final String message) {
super(message);
}
/**
* Constructs a new runtime exception with the specified detail message and cause.
*
* <p>Note that the detail message associated with cause is not automatically incorporated
* in this runtime exception's detail message.</p>
*
* @param message the detail message (which is saved for later retrieval by the
* Throwable.getMessage() method).
* @param cause the cause (which is saved for later retrieval by the Throwable.getCause()
* method). (A null value is permitted, and indicates that the cause is nonexistent or unknown.)
*/
public SketchesException(final String message, final Throwable cause) {
super(message, cause);
}
}
| 2,698 |
0 | Create_ds/datasketches-java/src/main/java/org/apache/datasketches | Create_ds/datasketches-java/src/main/java/org/apache/datasketches/thetacommon/BinomialBoundsN.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.thetacommon;
import org.apache.datasketches.common.SketchesArgumentException;
/**
* This class enables the estimation of error bounds given a sample set size, the sampling
* probability theta, the number of standard deviations and a simple noDataSeen flag. This can
* be used to estimate error bounds for fixed threshold sampling as well as the error bounds
* calculations for sketches.
*
* @author Kevin Lang
*/
// BTW, the suffixes "NStar", "NPrimeB", and "NPrimeF" correspond to variables in the formal
// writeup of this scheme.
public final class BinomialBoundsN {
private BinomialBoundsN() {}
private static final double[] deltaOfNumSDev =
{
0.5000000000000000000, // = 0.5 (1 + erf(0)
0.1586553191586026479, // = 0.5 (1 + erf((-1/sqrt(2))))
0.0227502618904135701, // = 0.5 (1 + erf((-2/sqrt(2))))
0.0013498126861731796 // = 0.5 (1 + erf((-3/sqrt(2))))
};
// our "classic" bounds, but now with continuity correction
private static double contClassicLB(final double numSamplesF, final double theta,
final double numSDev) {
final double nHat = (numSamplesF - 0.5) / theta;
final double b = numSDev * Math.sqrt((1.0 - theta) / theta);
final double d = 0.5 * b * Math.sqrt((b * b) + (4.0 * nHat));
final double center = nHat + (0.5 * (b * b));
return (center - d);
}
private static double contClassicUB(final double numSamplesF, final double theta,
final double numSDev) {
final double nHat = (numSamplesF + 0.5) / theta;
final double b = numSDev * Math.sqrt((1.0 - theta) / theta);
final double d = 0.5 * b * Math.sqrt((b * b) + (4.0 * nHat));
final double center = nHat + (0.5 * (b * b));
return (center + d);
}
// This is a special purpose calculator for NStar, using a computational
// strategy inspired by its Bayesian definition. It is only appropriate
// for a very limited set of inputs. However, the procedure computeApproxBinoLB ()
// below does in fact only call it for suitably limited inputs.
// Outside of this limited range, two different bad things will happen.
// First, because we are not using logarithms, the values of intermediate
// quantities will exceed the dynamic range of doubles. Second, even if that
// problem were fixed, the running time of this procedure is essentially linear
// in est = (numSamples / p), and that can be Very, Very Big.
private static long specialNStar(final long numSamplesI, final double p, final double delta) {
final double q, numSamplesF;
double tot, curTerm;
long m;
assertTrue(numSamplesI >= 1);
assertTrue((0.0 < p) && (p < 1.0));
assertTrue((0.0 < delta) && (delta < 1.0));
q = 1.0 - p;
numSamplesF = numSamplesI;
// Use a different algorithm if the following isn't true; this one will be too slow, or worse.
assertTrue((numSamplesF / p) < 500.0);
curTerm = Math.pow(p, numSamplesF); // curTerm = posteriorProbability (k, k, p)
assertTrue(curTerm > 1e-100); // sanity check for non-use of logarithms
tot = curTerm;
m = numSamplesI;
while (tot <= delta) { // this test can fail even the first time
curTerm = (curTerm * q * (m)) / ((m + 1) - numSamplesI);
tot += curTerm;
m += 1;
}
// we have reached a state where tot > delta, so back up one
return (m - 1);
}
// The following procedure has very limited applicability.
// The above remarks about specialNStar() also apply here.
private static long specialNPrimeB(final long numSamplesI, final double p, final double delta) {
final double q, numSamplesF, oneMinusDelta;
double tot, curTerm;
long m;
assertTrue(numSamplesI >= 1);
assertTrue((0.0 < p) && (p < 1.0));
assertTrue((0.0 < delta) && (delta < 1.0));
q = 1.0 - p;
oneMinusDelta = 1.0 - delta;
numSamplesF = numSamplesI;
curTerm = Math.pow(p, numSamplesF); // curTerm = posteriorProbability (k, k, p)
assertTrue(curTerm > 1e-100); // sanity check for non-use of logarithms
tot = curTerm;
m = numSamplesI;
while (tot < oneMinusDelta) {
curTerm = (curTerm * q * (m)) / ((m + 1) - numSamplesI);
tot += curTerm;
m += 1;
}
return (m); // don't need to back up
}
private static long specialNPrimeF(final long numSamplesI, final double p, final double delta) {
// Use a different algorithm if the following isn't true; this one will be too slow, or worse.
assertTrue(((numSamplesI) / p) < 500.0); //A super-small delta could also make it slow.
return (specialNPrimeB(numSamplesI + 1, p, delta));
}
// The following computes an approximation to the lower bound of
// a Frequentist confidence interval based on the tails of the Binomial distribution.
private static double computeApproxBinoLB(final long numSamplesI, final double theta,
final int numSDev) {
if (theta == 1.0) {
return (numSamplesI);
}
else if (numSamplesI == 0) {
return (0.0);
}
else if (numSamplesI == 1) {
final double delta = deltaOfNumSDev[numSDev];
final double rawLB = (Math.log(1.0 - delta)) / (Math.log(1.0 - theta));
return (Math.floor(rawLB)); // round down
}
else if (numSamplesI > 120) {
// plenty of samples, so gaussian approximation to binomial distribution isn't too bad
final double rawLB = contClassicLB( numSamplesI, theta, numSDev);
return (rawLB - 0.5); // fake round down
}
// at this point we know 2 <= numSamplesI <= 120
else if (theta > (1.0 - 1e-5)) { // empirically-determined threshold
return (numSamplesI);
}
else if (theta < ((numSamplesI) / 360.0)) { // empirically-determined threshold
// here we use the gaussian approximation, but with a modified "numSDev"
final int index;
final double rawLB;
index = (3 * ((int) numSamplesI)) + (numSDev - 1);
rawLB = contClassicLB(numSamplesI, theta, EquivTables.getLB(index));
return (rawLB - 0.5); // fake round down
}
else { // This is the most difficult range to approximate; we will compute an "exact" LB.
// We know that est <= 360, so specialNStar() shouldn't be ridiculously slow.
final double delta = deltaOfNumSDev[numSDev];
final long nstar = specialNStar(numSamplesI, theta, delta);
return (nstar); // don't need to round
}
}
// The following computes an approximation to the upper bound of
// a Frequentist confidence interval based on the tails of the Binomial distribution.
private static double computeApproxBinoUB(final long numSamplesI, final double theta,
final int numSDev) {
if (theta == 1.0) {
return (numSamplesI);
}
else if (numSamplesI == 0) {
final double delta = deltaOfNumSDev[numSDev];
final double rawUB = (Math.log(delta)) / (Math.log(1.0 - theta));
return (Math.ceil(rawUB)); // round up
}
else if (numSamplesI > 120) {
// plenty of samples, so gaussian approximation to binomial distribution isn't too bad
final double rawUB = contClassicUB(numSamplesI, theta, numSDev);
return (rawUB + 0.5); // fake round up
}
// at this point we know 1 <= numSamplesI <= 120
else if (theta > (1.0 - 1e-5)) { // empirically-determined threshold
return (numSamplesI + 1);
}
else if (theta < ((numSamplesI) / 360.0)) { // empirically-determined threshold
// here we use the gaussian approximation, but with a modified "numSDev"
final int index;
final double rawUB;
index = (3 * ((int) numSamplesI)) + (numSDev - 1);
rawUB = contClassicUB(numSamplesI, theta, EquivTables.getUB(index));
return (rawUB + 0.5); // fake round up
}
else { // This is the most difficult range to approximate; we will compute an "exact" UB.
// We know that est <= 360, so specialNPrimeF() shouldn't be ridiculously slow.
final double delta = deltaOfNumSDev[numSDev];
final long nprimef = specialNPrimeF(numSamplesI, theta, delta);
return (nprimef); // don't need to round
}
}
// The following two procedures enforce some extra rules that help
// to prevent the return of bounds that might be confusing to users.
/**
* Returns the approximate lower bound value
* @param numSamples the number of samples in the sample set
* @param theta the sampling probability
* @param numSDev the number of "standard deviations" from the mean for the tail bounds.
* This must be an integer value of 1, 2 or 3.
* @param noDataSeen this is normally false. However, in the case where you have zero samples
* and a theta < 1.0, this flag enables the distinction between a virgin case when no actual
* data has been seen and the case where the estimate may be zero but an upper error bound may
* still exist.
* @return the approximate lower bound value
*/
public static double getLowerBound(final long numSamples, final double theta, final int numSDev,
final boolean noDataSeen) {
//in earlier code numSamples was called numSamplesI
if (noDataSeen) { return 0.0; }
checkArgs(numSamples, theta, numSDev);
final double lb = computeApproxBinoLB(numSamples, theta, numSDev);
final double numSamplesF = numSamples;
final double est = numSamplesF / theta;
return (Math.min(est, Math.max(numSamplesF, lb)));
}
/**
* Returns the approximate upper bound value
* @param numSamples the number of samples in the sample set
* @param theta the sampling probability
* @param numSDev the number of "standard deviations" from the mean for the tail bounds.
* This must be an integer value of 1, 2 or 3.
* @param noDataSeen this is normally false. However, in the case where you have zero samples
* and a theta < 1.0, this flag enables the distinction between a virgin case when no actual
* data has been seen and the case where the estimate may be zero but an upper error bound may
* still exist.
* @return the approximate upper bound value
*/
public static double getUpperBound(final long numSamples, final double theta, final int numSDev,
final boolean noDataSeen) {
//in earlier code numSamples was called numSamplesI
if (noDataSeen) { return 0.0; }
checkArgs(numSamples, theta, numSDev);
final double ub = computeApproxBinoUB(numSamples, theta, numSDev);
final double numSamplesF = numSamples;
final double est = numSamplesF / theta;
return (Math.max(est, ub));
}
//exposed only for test
static void checkArgs(final long numSamples, final double theta, final int numSDev) {
if ((numSDev | (numSDev - 1) | (3 - numSDev) | numSamples) < 0) {
throw new SketchesArgumentException(
"numSDev must only be 1,2, or 3 and numSamples must >= 0: numSDev="
+ numSDev + ", numSamples=" + numSamples);
}
if ((theta < 0.0) || (theta > 1.0)) {
throw new SketchesArgumentException("0.0 < theta <= 1.0: " + theta);
}
}
private static void assertTrue(final boolean truth) {
assert (truth);
}
} // end of class "BinomialBoundsN"
| 2,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.