index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/impl/ImmutableArrayList.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.impl;
import com.netflix.zeno.util.collections.builder.ListBuilder;
import java.util.AbstractList;
import java.util.Collection;
import java.util.List;
/**
* Immutable implementation of the List interface
*
* @author tvaliulin
*
* @param <E>
*/
public class ImmutableArrayList<E> extends AbstractList<E> implements ListBuilder<E> {
protected Object[] elements;
public ImmutableArrayList() {
}
public ImmutableArrayList(Collection<E> collection) {
setElements(collection);
}
protected void setElements(Collection<E> collection) {
builderInit(collection.size());
int i = 0;
for (E entry : collection) {
builderSet(i++, entry);
}
builderFinish();
}
@SuppressWarnings("unchecked")
@Override
public E get(int index) {
return (E) elements[index];
}
@Override
public int size() {
return elements.length;
}
@Override
public void builderInit(int size) {
this.elements = new Object[size];
}
@Override
public void builderSet(int index, E element) {
elements[index] = element;
}
@Override
public List<E> builderFinish() {
return this;
}
} | 8,300 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/impl/Utils.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.impl;
/**
* Utility methods
*
* @author tvaliulin
*
*/
public class Utils {
/**
* Array utils
*
*/
public final static class Array {
public static final void swap(Object[] keysAndValues, int x, int y) {
Object key = keysAndValues[x * 2];
Object value = keysAndValues[x * 2 + 1];
keysAndValues[x * 2] = keysAndValues[y * 2];
keysAndValues[x * 2 + 1] = keysAndValues[y * 2 + 1];
keysAndValues[y * 2] = key;
keysAndValues[y * 2 + 1] = value;
}
}
public static final boolean equal(Object o1, Object o2) {
if (o1 == null && o2 == null)
return true;
if (o1 == null)
return false;
return o1.equals(o2);
}
}
| 8,301 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/impl/SingletonSortedMap.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.impl;
import java.io.Serializable;
import java.util.AbstractMap;
import java.util.Collection;
import java.util.Comparator;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
/**
* Implementation of Singleton SortedMap
*
* @author tvaliulin
*
* @param <K>
* @param <V>
*/
public class SingletonSortedMap<K, V> extends AbstractMap<K, V> implements SortedMap<K, V>, Serializable {
private static final long serialVersionUID = 4009578255191820277L;
private final K key;
private final V value;
public SingletonSortedMap(K key, V value) {
this.key = key;
this.value = value;
}
@Override
public boolean containsKey(Object object) {
return Utils.equal(this.key, object);
}
@Override
public boolean containsValue(Object object) {
return Utils.equal(this.value, object);
}
@Override
public V get(Object object) {
return Utils.equal(this.key, object) ? this.value : null;
}
@Override
public boolean isEmpty() {
return false;
}
@Override
public Set<K> keySet() {
return java.util.Collections.singleton(this.key);
}
@Override
public int size() {
return 1;
}
@Override
public Collection<V> values() {
return java.util.Collections.singleton(this.value);
}
@Override
@SuppressWarnings({ "unchecked", "rawtypes" })
public Set<java.util.Map.Entry<K, V>> entrySet() {
return java.util.Collections.singleton((Map.Entry<K, V>) new SimpleImmutableEntry(key, value));
}
@Override
public Comparator<? super K> comparator() {
return comparator();
}
@Override
public K firstKey() {
return this.key;
}
@Override
public SortedMap<K, V> headMap(K toKey) {
return (comparator().compare(this.key, toKey) < 0) ? this : NetflixCollections.<K, V> emptySortedMap();
}
@Override
public K lastKey() {
return this.key;
}
@Override
@SuppressWarnings("unchecked")
public SortedMap<K, V> subMap(K fromKey, K toKey) {
Comparator<K> comparator = (Comparator<K>) comparator();
return ((comparator.compare(this.key, toKey) < 0) && (comparator.compare(this.key, fromKey) >= 0)) ? this : NetflixCollections.<K, V> emptySortedMap();
}
@Override
public SortedMap<K, V> tailMap(K fromKey) {
return (comparator().compare(this.key, fromKey) >= 0) ? this : NetflixCollections.<K, V> emptySortedMap();
}
} | 8,302 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/impl/BinarySearchArraySet.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.impl;
import com.netflix.zeno.util.collections.Comparators;
import java.util.Arrays;
import java.util.Collection;
import java.util.Comparator;
import java.util.Set;
/**
* Immutable Binary Search implementation of the Set interface
*
* @author tvaliulin
*
* @param <E>
*/
public class BinarySearchArraySet<E> extends AbstractArraySet<E> {
protected Object[] elements;
public BinarySearchArraySet() {
super();
}
public BinarySearchArraySet(Collection<E> from) {
super(from);
}
@Override
public int size() {
return elements.length;
}
public Comparator<Object> comparator() {
return Comparators.hashCodeComparator();
}
@Override
public boolean contains(Object o) {
int hash = hashCode(o);
int index = Arrays.binarySearch(elements, o, comparator());
if (index < 0) {
return false;
}
// going upward
for (int i = index; i >= 0 && hashCode(elements[i]) == hash; i--) {
if (Utils.equal(o, elements[i])) {
return true;
}
}
// going downward
for (int i = index + 1; i < size() && hashCode(elements[i]) == hash; i++) {
if (Utils.equal(o, elements[i])) {
return true;
}
}
return false;
}
@SuppressWarnings("unchecked")
@Override
protected E element(int index) {
return (E) elements[index];
}
@Override
public void builderInit(int size) {
elements = new Object[size];
}
@Override
public void builderSet(int index, E element) {
elements[index] = element;
}
@Override
public Set<E> builderFinish() {
Arrays.sort(elements, comparator());
return this;
}
}
| 8,303 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/impl/OpenAddressingHashMap.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.impl;
import com.netflix.zeno.util.collections.builder.MapBuilder;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
/**
* Open Addressing hash map immutable implementation of the Map interface
*
* @author dkoszevnik
* @author tvaliulin
*
* @param <K>
* @param <V>
*/
public class OpenAddressingHashMap<K, V> extends AbstractArrayMap<K, V> implements MapBuilder<K, V> {
// hashTable will be byte[], short[], or int[], depending on how many entries the Map has.
// hashTable[i] points to the index of a key in the entries[] array which
// hashes to i.
protected Object hashTable;
// entries.length is 2*number of key/value pairs. All keys are located at
// even array indices.
// the value for a given key is located at entries[keyIndex + 1];
protected Object keysAndValues[];
protected int size;
public OpenAddressingHashMap() {
setMap(Collections.<K, V> emptyMap());
}
public OpenAddressingHashMap(Map<K, V> map) {
setMap(map);
}
public OpenAddressingHashMap(Map.Entry<K, V>[] entries) {
setMap(entries);
}
public OpenAddressingHashMap(AbstractArrayMap<K, V> map, int start, int end) {
super(map, start, end);
}
// 70% load factor
public float loadFactor() {
return 0.7f;
}
@Override
public void builderInit(int numEntries) {
hashTable = OpenAddressing.newHashTable(numEntries, loadFactor());
keysAndValues = new Object[numEntries * 2];
}
@Override
public void builderPut(int index, K key, V value) {
keysAndValues[size * 2] = key;
keysAndValues[(size * 2) + 1] = value;
size++;
}
@Override
public Map<K, V> builderFinish() {
// / Math.abs(x % n) is the same as (x & n-1) when n is a power of 2
int hashModMask = OpenAddressing.hashTableLength(hashTable) - 1;
if(keysAndValues.length > size * 2)
keysAndValues = Arrays.copyOf(keysAndValues, size * 2);
for (int i = 0; i < keysAndValues.length; i += 2) {
int hash = hashCode(keysAndValues[i]);
int bucket = hash & hashModMask;
/// linear probing resolves collisions
while (OpenAddressing.getHashEntry(hashTable, bucket) != -1) {
bucket = (bucket + 1) & hashModMask;
}
OpenAddressing.setHashEntry(hashTable, bucket, i / 2);
}
return this;
}
@Override
protected int rehash(int hash) {
return OpenAddressing.rehash(hash);
}
@Override
public int size() {
return size;
}
@Override
@SuppressWarnings("unchecked")
protected K key(int index) {
return (K) keysAndValues[index * 2];
}
@Override
@SuppressWarnings("unchecked")
protected V value(int index) {
return (V) keysAndValues[index * 2 + 1];
}
/**
* If finish() has already been called on this map, this method returns the
* value associated with the specified key. If the specified key is not in
* this map, returns null.
*
* If finish() has not been called on this map, this method always returns
* null.
*/
@Override
@SuppressWarnings("unchecked")
public Object getUndefined(Object key) {
// Math.abs(x % n) is the same as (x & n-1) when n is a power of 2
int hashModMask = OpenAddressing.hashTableLength(hashTable) - 1;
int hash = hashCode(key);
int bucket = hash & hashModMask;
int hashEntry = OpenAddressing.getHashEntry(hashTable, bucket) * 2;
// We found an entry at this hash position
while (hashEntry >= 0) {
if (Utils.equal(keysAndValues[hashEntry], key)) {
return (V) keysAndValues[hashEntry + 1];
}
// linear probing resolves collisions.
bucket = (bucket + 1) & hashModMask;
hashEntry = OpenAddressing.getHashEntry(hashTable, bucket) * 2;
}
return AbstractArrayMap.undefined;
}
}
| 8,304 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/impl/BinarySearchArrayIndexedSet.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.impl;
import com.netflix.zeno.util.collections.Comparators;
import com.netflix.zeno.util.collections.algorithms.Sortable;
import com.netflix.zeno.util.collections.algorithms.ArrayQuickSort;
import java.util.Arrays;
import java.util.Collection;
import java.util.Comparator;
import java.util.Set;
/**
* Immutable Binary Search implementation of the Set interface with hashCode
* index on the side
*
* @author tvaliulin
*
* @param <E>
*/
public class BinarySearchArrayIndexedSet<E> extends AbstractArraySet<E> implements Sortable<Integer> {
protected int[] hashes;
protected Object[] elements;
public BinarySearchArrayIndexedSet() {
super();
}
public BinarySearchArrayIndexedSet(Collection<E> from) {
super(from);
}
@Override
public int size() {
return elements.length;
}
public Comparator<Object> comparator() {
return Comparators.hashCodeComparator();
}
@Override
public boolean contains(Object o) {
int hash = hashCode(o);
int index = Arrays.binarySearch(hashes, hash);
if (index < 0) {
return false;
}
// going upward
for (int i = index; i >= 0 && hashes[i] == hash; i--) {
if (Utils.equal(o, element(i))) {
return true;
}
}
// going downward
for (int i = index + 1; i < size() && hashes[i] == hash; i++) {
if (Utils.equal(o, element(i))) {
return true;
}
}
return false;
}
@SuppressWarnings("unchecked")
@Override
protected E element(int index) {
return (E) elements[index];
}
@Override
public void builderInit(int size) {
elements = new Object[size];
}
@Override
public void builderSet(int index, E element) {
elements[index] = element;
}
@Override
public Set<E> builderFinish() {
hashes = new int[elements.length];
for (int i = 0; i < elements.length; i++) {
hashes[i] = hashCode(elements[i]);
}
ArrayQuickSort.<Integer> sort(this, Comparators.<Integer> comparableComparator());
return this;
}
@Override
public Integer at(int index) {
return hashes[index];
}
@Override
public void swap(int x, int y) {
int hashX = hashes[x];
hashes[x] = hashes[y];
hashes[y] = hashX;
Object eX = elements[x];
elements[x] = elements[y];
elements[y] = eX;
}
}
| 8,305 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/impl/BinarySearchArrayIndexedHashMap.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.impl;
import com.netflix.zeno.util.collections.Comparators;
import com.netflix.zeno.util.collections.algorithms.Sortable;
import com.netflix.zeno.util.collections.algorithms.ArrayQuickSort;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
/**
* Implementation of the BinarySearch Map with the hashCode search index on a
* side
*
* @author tvaliulin
*
* @param <K>
* @param <V>
*/
public class BinarySearchArrayIndexedHashMap<K, V> extends AbstractArrayMap<K, V> implements Sortable<Integer> {
protected int[] hashes = null;
protected Object[] keysAndValues = null;
public BinarySearchArrayIndexedHashMap() {
setMap(Collections.<K, V> emptyMap());
}
public BinarySearchArrayIndexedHashMap(Map<K, V> map) {
setMap(map);
}
public BinarySearchArrayIndexedHashMap(Map.Entry<K, V>[] entries) {
setMap(entries);
}
public BinarySearchArrayIndexedHashMap(AbstractArrayMap<K, V> map, int start, int end) {
super(map, start, end);
}
@Override
public void builderInit(int size) {
keysAndValues = new Object[size * 2];
}
@Override
public void builderPut(int index, K key, V value) {
keysAndValues[index * 2] = key;
keysAndValues[index * 2 + 1] = value;
}
@Override
public Map<K, V> builderFinish() {
hashes = new int[keysAndValues.length / 2];
for (int i = 0; i < keysAndValues.length / 2; i++) {
hashes[i] = hashCode(keysAndValues[i * 2]);
}
ArrayQuickSort.<Integer> sort(this, Comparators.<Integer> comparableComparator());
return this;
}
@Override
public Integer at(int index) {
return hashes[index];
}
@Override
public void swap(int x, int y) {
int hashX = hashes[x];
hashes[x] = hashes[y];
hashes[y] = hashX;
Utils.Array.swap(keysAndValues, x, y);
}
@Override
public int size() {
return hashes.length;
}
@SuppressWarnings("unchecked")
@Override
protected K key(int index) {
return (K) keysAndValues[index * 2];
}
@SuppressWarnings("unchecked")
@Override
protected V value(int index) {
return (V) keysAndValues[index * 2 + 1];
}
@Override
public Object getUndefined(Object key) {
int hash = hashCode(key);
int index = Arrays.binarySearch(hashes, hash);
if (index < 0) {
return AbstractArrayMap.undefined;
}
// going upward
for (int i = index; i >= 0 && hashes[i] == hash; i--) {
if (Utils.equal(key, key(i))) {
return value(i);
}
}
// going downward
for (int i = index + 1; i < size() && hashes[i] == hash; i++) {
if (Utils.equal(key, key(i))) {
return value(i);
}
}
return AbstractArrayMap.undefined;
}
}
| 8,306 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/impl/OpenAddressing.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.impl;
import java.util.Arrays;
/**
* Common functionality for the Open Addressing HashSet and HashMaps.
*
* @author dkoszewnik
*
*/
public class OpenAddressing {
/*
* The number of entries will determine the size of the hash table elements.
*
* If the entry index can be always represented in 8 bits, the hash table
* will be byte[] If the entry index can always be represented in 16 bits,
* the hash table will be short[] Otherwise, the hash table will be int[].
*
* The sign bit is used for byte[] and short[] hash tables, but the value -1
* is reserved to mean "empty".
*
* Because the entries[] array stores both keys and values, the key for
* entry "n" will be stored at entries[n*2]. "n" is the value stored in the
* hash table.
*/
public static Object newHashTable(int numEntries, float loadFactor) {
int hashSize = (int) Math.ceil((float) numEntries / loadFactor);
hashSize = 1 << (32 - Integer.numberOfLeadingZeros(hashSize)); // next
// power
// of 2
if (numEntries < 256) {
byte hashTable[] = new byte[hashSize];
Arrays.fill(hashTable, (byte) -1);
return hashTable;
}
if (numEntries < 65536) {
short hashTable[] = new short[hashSize];
Arrays.fill(hashTable, (short) -1);
return hashTable;
}
int hashTable[] = new int[hashSize];
Arrays.fill(hashTable, -1);
return hashTable;
}
// the type of primitives used to represent the hash table entries pivots
// based on the necessary bits
public static int hashTableLength(Object hashTable) {
if (hashTable instanceof byte[]) {
return ((byte[]) hashTable).length;
} else if (hashTable instanceof short[]) {
return ((short[]) hashTable).length;
}
return ((int[]) hashTable).length;
}
// / the type of primitives used to represent the hash table entries pivots
// based on the necessary bits
public static int getHashEntry(Object hashTable, int bucket) {
if (hashTable instanceof byte[]) {
int entry = ((byte[]) hashTable)[bucket] & 0xFF;
return entry == 0xFF ? -1 : entry;
}
if (hashTable instanceof short[]) {
int entry = ((short[]) hashTable)[bucket] & 0xFFFF;
return entry == 0xFFFF ? -1 : entry;
}
return ((int[]) hashTable)[bucket];
}
// the type of primitives used to represent the hash table entries pivots
// based on the necessary bits
public static void setHashEntry(Object hashTable, int bucket, int value) {
if (hashTable instanceof byte[]) {
((byte[]) hashTable)[bucket] = (byte) value;
} else if (hashTable instanceof short[]) {
((short[]) hashTable)[bucket] = (short) value;
} else {
((int[]) hashTable)[bucket] = value;
}
}
// this is Thomas Wang's commonly used 32-bit hash function.
// it gives a very good balance between CPU and distribution of
// keys. This is extremely important when using open-addressed hashing
// like we are in this Map structure.
public static int rehash(int hash) {
hash = ~hash + (hash << 15);
hash = hash ^ (hash >>> 12);
hash = hash + (hash << 2);
hash = hash ^ (hash >>> 4);
hash = hash * 2057;
hash = hash ^ (hash >>> 16);
return hash;
}
}
| 8,307 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/impl/AbstractArraySet.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.impl;
import com.netflix.zeno.util.collections.builder.SetBuilder;
import java.util.AbstractSet;
import java.util.Collection;
import java.util.ConcurrentModificationException;
import java.util.Iterator;
import java.util.NoSuchElementException;
/**
* Abstract class which helps people to write Array based immutable
* implementations of the Set interface
*
* @author tvaliulin
*
* @param <E>
*/
public abstract class AbstractArraySet<E> extends AbstractSet<E> implements SetBuilder<E> {
public AbstractArraySet() {
}
public AbstractArraySet(Collection<E> from) {
setElements(from);
}
protected void setElements(Collection<E> from) {
builderInit(from.size());
int i = 0;
for (E element : from) {
builderSet(i++, element);
}
builderFinish();
}
@Override
public abstract int size();
@Override
public abstract boolean contains(Object o);
@Override
public Iterator<E> iterator() {
return new SetIterator();
}
protected void removeElement(int index) {
throw new UnsupportedOperationException();
}
protected abstract E element(int index);
private class SetIterator implements Iterator<E> {
int cursor; // index of next element to return
int lastRet = -1; // index of last element returned; -1 if no such
@Override
public boolean hasNext() {
return cursor != AbstractArraySet.this.size();
}
@Override
public E next() {
int i = cursor;
if (i >= AbstractArraySet.this.size())
throw new NoSuchElementException();
cursor = i + 1;
lastRet = i;
return (E) AbstractArraySet.this.element(lastRet);
}
@Override
public void remove() {
if (lastRet < 0)
throw new IllegalStateException();
try {
AbstractArraySet.this.removeElement(lastRet);
cursor = lastRet;
lastRet = -1;
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
protected int hashCode(Object o) {
return o == null ? 0 : rehash(o.hashCode());
}
protected int rehash(int hash) {
return hash;
}
}
| 8,308 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/impl/BinarySearchArrayHashMap.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.impl;
import com.netflix.zeno.util.collections.Comparators;
import java.util.Comparator;
import java.util.Map;
/**
* Immutable BinarySearch map with the hashCodeComparator - this comparator does
* not mandate for the keys to be comparable
*
* @author tvaliulin
*
* @param <K>
* @param <V>
*/
public class BinarySearchArrayHashMap<K, V> extends BinarySearchArrayMap<K, V> {
public BinarySearchArrayHashMap() {
super();
}
public BinarySearchArrayHashMap(Map<K, V> map) {
super(map);
}
public BinarySearchArrayHashMap(Map.Entry<K, V>[] entries) {
super(entries);
}
@Override
public Comparator<K> comparator() {
return Comparators.hashCodeComparator();
}
}
| 8,309 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/impl/AbstractArraySortedMap.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.impl;
import com.netflix.zeno.util.collections.Comparators;
import com.netflix.zeno.util.collections.algorithms.Sortable;
import com.netflix.zeno.util.collections.algorithms.BinarySearch;
import com.netflix.zeno.util.collections.builder.MapBuilder;
import java.util.Comparator;
import java.util.Iterator;
import java.util.Map;
import java.util.SortedMap;
/**
* Abstract class which helps people to write Array based implementations of the
* immutable SortedMap interface
*
* @author tvaliulin
*
* @param <K>
* @param <V>
*/
public abstract class AbstractArraySortedMap<K, V> extends AbstractArrayMap<K, V> implements SortedMap<K, V>, Sortable<K>, Comparable<AbstractArrayMap<K, V>>, MapBuilder<K, V> {
public AbstractArraySortedMap() {
}
public AbstractArraySortedMap(AbstractArrayMap<K, V> map, int start, int end) {
super(map, start, end);
}
@Override
public abstract Comparator<K> comparator();
@SuppressWarnings("unchecked")
@Override
public SortedMap<K, V> subMap(K fromKey, K toKey) {
int start = 0;
if (fromKey != null) {
start = BinarySearch.binarySearch(this, fromKey, (Comparator<Object>) comparator());
start = ((start >= 0) ? start : (-start - 1));
for (int i = start; i >= 0 && i < size() && comparator().compare(key(i), fromKey) >= 0; i--) {
start = i;
}
}
int end = size();
if (toKey != null) {
end = BinarySearch.binarySearch(this, toKey, (Comparator<Object>) comparator());
end = ((end >= 0) ? end : (-end - 1));
for (int i = end; i >= 0 && i < size() && comparator().compare(key(i), toKey) < 0; i++) {
end = i;
}
}
start = Math.max(start, 0);
end = Math.min(end, size());
return newMap(start, end);
}
public abstract SortedMap<K, V> newMap(int start, int end);
@Override
public SortedMap<K, V> headMap(K toKey) {
return subMap(null, toKey);
}
@Override
public SortedMap<K, V> tailMap(K fromKey) {
return subMap(fromKey, null);
}
@Override
public K firstKey() {
return key(0);
}
@Override
public K lastKey() {
return key(size() - 1);
}
@Override
public K at(int index) {
return key(index);
}
@Override
public abstract void swap(int x, int y);
@Override
public int compareTo(AbstractArrayMap<K, V> o) {
if (o == this)
return 0;
if (!(o instanceof BinarySearchArrayMap)) {
return getClass().getCanonicalName().compareTo(o.getClass().getCanonicalName());
}
Map<K, V> m = (Map<K, V>) o;
if (size() == 0 && m.size() == 0) {
return 0;
}
Iterator<Map.Entry<K, V>> itSelf = entrySet().iterator();
Iterator<Map.Entry<K, V>> itOther = m.entrySet().iterator();
for (;;) {
boolean selfNext = itSelf.hasNext();
boolean otherNext = itOther.hasNext();
if (!selfNext && !otherNext) {
return 0;
}
if (!selfNext && otherNext) {
return -1;
}
if (selfNext && !otherNext) {
return 1;
}
Map.Entry<K, V> selfEntry = itSelf.next();
Map.Entry<K, V> otherEntry = itOther.next();
int keyCompare = comparator().compare(selfEntry.getKey(), otherEntry.getKey());
if (keyCompare != 0) {
return keyCompare;
}
int valueCompare = Comparators.comparableComparator().compare(selfEntry.getValue(), otherEntry.getValue());
if (valueCompare != 0) {
return valueCompare;
}
}
}
}
| 8,310 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/impl/OpenAddressingSortedHashMap.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.impl;
import com.netflix.zeno.util.collections.Comparators;
import com.netflix.zeno.util.collections.algorithms.Sortable;
import com.netflix.zeno.util.collections.algorithms.ArrayQuickSort;
import com.netflix.zeno.util.collections.algorithms.BinarySearch;
import com.netflix.zeno.util.collections.builder.MapBuilder;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.Map;
import java.util.SortedMap;
/**
* Open Addressing hash map immutable implementation of the SortedMap interface
*
* @author tvaliulin
* @author dkoszevnik
*
* @param <K>
* @param <V>
*/
public class OpenAddressingSortedHashMap<K, V> extends OpenAddressingHashMap<K, V> implements MapBuilder<K, V>, SortedMap<K, V>, Sortable<K> {
public OpenAddressingSortedHashMap() {
setMap(Collections.<K, V> emptyMap());
}
public OpenAddressingSortedHashMap(Map<K, V> map) {
setMap(map);
}
public OpenAddressingSortedHashMap(AbstractArrayMap<K, V> map, int start, int end) {
super(map, start, end);
}
public OpenAddressingSortedHashMap(Map.Entry<K, V>[] entries) {
setMap(entries);
}
@Override
public SortedMap<K, V> builderFinish() {
if(keysAndValues.length > size * 2)
keysAndValues = Arrays.copyOf(keysAndValues, size * 2);
if (comparator() != null) {
ArrayQuickSort.sort(this, comparator());
}
super.builderFinish();
return this;
}
@Override
public K at(int index) {
return key(index);
}
@Override
public void swap(int x, int y) {
Utils.Array.swap(keysAndValues, x, y);
}
@Override
public int size() {
return keysAndValues.length / 2;
}
@Override
public Comparator<K> comparator() {
return Comparators.comparableComparator();
}
public SortedMap<K, V> newMap(int start, int end) {
return new OpenAddressingSortedHashMap<K, V>(this, start, end);
}
/// SortedMap implementation ///
@SuppressWarnings("unchecked")
@Override
public SortedMap<K, V> subMap(K fromKey, K toKey) {
int start = 0;
if (fromKey != null) {
start = BinarySearch.binarySearch(this, fromKey, (Comparator<Object>) comparator());
start = ((start >= 0) ? start : (-start - 1));
for (int i = start; i >= 0 && i < size() && comparator().compare(key(i), fromKey) >= 0; i--) {
start = i;
}
}
int end = size();
if (toKey != null) {
end = BinarySearch.binarySearch(this, toKey, (Comparator<Object>) comparator());
end = ((end >= 0) ? end : (-end - 1));
for (int i = end; i >= 0 && i < size() && comparator().compare(key(i), toKey) < 0; i++) {
end = i;
}
}
start = Math.max(start, 0);
end = Math.min(end, size());
return newMap(start, end);
}
@Override
public SortedMap<K, V> headMap(K toKey) {
return subMap(null, toKey);
}
@Override
public SortedMap<K, V> tailMap(K fromKey) {
return subMap(fromKey, null);
}
@Override
public K firstKey() {
return key(0);
}
@Override
public K lastKey() {
return key(size() - 1);
}
}
| 8,311 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/heapfriendly/PhasedHeapFriendlyHashMap.java | /*
*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.heapfriendly;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
/**
* Implementation of {@link Map} that wraps a {@link HeapFriendlyHashMap}
* and exposes methods for switching in and out of a data swap phase. <p />
*
* The data swap phase is entered by calling {@link PhasedHeapFriendlyHashMap#beginDataSwapPhase(int)} <p />
*
* While in the data swap phase, {@link PhasedHeapFriendlyHashMap#put(Object, Object)}
* can be used to fill the map with data. Data will not be available until the data swap phase is complete. <p />
*
* Calling {@link PhasedHeapFriendlyHashMap#endDataSwapPhase()} will end the
* data swap phase and make the data added available
*
* @author drbathgate
*
* @param <K> Key
* @param <V> Value
*/
public class PhasedHeapFriendlyHashMap<K, V> implements Map<K, V> {
private final HeapFriendlyMapArrayRecycler recycler;
private HeapFriendlyHashMap<K, V> currentMap;
private HeapFriendlyHashMap<K, V> nextMap;
public PhasedHeapFriendlyHashMap() {
this.recycler = new HeapFriendlyMapArrayRecycler();
this.currentMap = new HeapFriendlyHashMap<K, V>(0, recycler);
}
@Override
public int size() {
return currentMap.size();
}
@Override
public boolean isEmpty() {
return currentMap.isEmpty();
}
@Override
public boolean containsKey(Object key) {
return currentMap.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return currentMap.containsValue(value);
}
@Override
public V get(Object key) {
return currentMap.get(key);
}
@Override
public V put(K key, V value) {
if (nextMap != null) {
return nextMap.put(key, value);
}
throw new IllegalStateException("PhasedHeapFriendlyHashMap.put(K, V) only usable when in the data swap phase");
}
@Override
public V remove(Object key) {
throw new UnsupportedOperationException("PhasedHeapFriendlyHashMap.remove(Object) not supported");
}
@Override
public void putAll(Map<? extends K, ? extends V> m) {
throw new UnsupportedOperationException("PhasedHeapFriendlyHashMap.putAll(Map) not supported, please use PhasedHeapFriendlyHashMap.put(Object) instead");
}
@Override
public void clear() {
throw new UnsupportedOperationException("PhasedHeapFriendlyHashMap.clear() not supported");
}
@Override
public Set<K> keySet() {
return currentMap.keySet();
}
@Override
public Collection<V> values() {
return currentMap.values();
}
@Override
public Set<java.util.Map.Entry<K, V>> entrySet() {
return currentMap.entrySet();
}
/**
* Enters data swap phase<p />
*
* While in data swap phase, {@link PhasedHeapFriendlyHashMap#put(Object, Object)} can be used<p />
*
* @param numOfNewEntries Number of new entries expected to be added during the the data swap phase
*/
public void beginDataSwapPhase(int numOfNewEntries){
if (nextMap != null) {
throw new IllegalStateException("Cannot call PhasedHeapFriendlyHashMap.beginDataSwapPhase(int), already in data swap phase");
}
recycler.swapCycleObjectArrays();
nextMap = new HeapFriendlyHashMap<K, V>(numOfNewEntries, recycler);
}
/**
* Ends the data swap phase<p />
*
* While out of the data swap phase, using {@link PhasedHeapFriendlyHashMap#put(Object, Object)}
* will throw an {@link IllegalStateException} <p />
*/
public void endDataSwapPhase(){
if (nextMap == null) {
throw new IllegalStateException("Cannot call PhasedHeapFriendlyHashMap.endDataSwapPhase(), not currently in data swap phase");
}
HeapFriendlyHashMap<K, V> temp = currentMap;
currentMap = nextMap;
nextMap = null;
temp.releaseObjectArrays();
recycler.clearNextCycleObjectArrays();
}
} | 8,312 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/heapfriendly/AbstractHeapFriendlyMap.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.heapfriendly;
import static com.netflix.zeno.util.collections.heapfriendly.HeapFriendlyMapArrayRecycler.INDIVIDUAL_OBJECT_ARRAY_SIZE;
import java.util.AbstractMap;
import java.util.Iterator;
import java.util.Map;
import java.util.NoSuchElementException;
/**
*
* The AbstractHeapFriendlyHashMap is an open-addressing, linear probing hash table. There are two implementations,<p/>
*
* HeapFriendlyHashMap - which uses two segmented arrays for keys and values<br/>
* HeapFriendlyDerivableKeyHashMap - which uses a single segmented array for values, and the keys are trivially derivable from the values.<p/>
*
* The segmented arrays are composed individual Object[] arrays which are each 4096 elements long.
*
* @see HeapFriendlyMapArrayRecycler
*
* @author dkoszewnik
*
*/
public abstract class AbstractHeapFriendlyMap<K, V> extends AbstractMap<K, V> {
@SuppressWarnings("unchecked")
protected Object segmentedGet(Object[][] segmentedArray, int bucket) {
int arrayIndex = bucket / INDIVIDUAL_OBJECT_ARRAY_SIZE;
int elementIndex = bucket % INDIVIDUAL_OBJECT_ARRAY_SIZE;
return (V) segmentedArray[arrayIndex][elementIndex];
}
protected void segmentedSet(Object[][] segmentedArray, int bucket, Object value) {
int arrayIndex = bucket / INDIVIDUAL_OBJECT_ARRAY_SIZE;
int elementIndex = bucket % INDIVIDUAL_OBJECT_ARRAY_SIZE;
segmentedArray[arrayIndex][elementIndex] = value;
}
public abstract void releaseObjectArrays();
protected void releaseObjectArrays(Object[][] segmentedArray, HeapFriendlyMapArrayRecycler recycler) {
for(int i=0;i<segmentedArray.length;i++) {
recycler.returnObjectArray(segmentedArray[i]);
}
}
@Override
public V remove(Object key) {
throw new UnsupportedOperationException("VMS error: Cannot remove items from a HeapFriendlyMap");
}
@Override
public void putAll(Map<? extends K, ? extends V> m) {
throw new UnsupportedOperationException("VMS error: HeapFriendlyMap cannot be added to with a specified key. Please use put(V value).");
}
@Override
public void clear() {
throw new UnsupportedOperationException("VMS error: Cannot clear a HeapFriendlyMap.");
}
protected class HeapFriendlyMapIterator<T> implements Iterator<T> {
protected final Object[][] segmentedArray;
protected final int numBuckets;
protected int current = -1;
protected HeapFriendlyMapIterator(Object[][] segmentedArray, int numBuckets) {
this.segmentedArray = segmentedArray;
this.numBuckets = numBuckets;
moveToNext();
}
public boolean hasNext() {
return current < numBuckets;
}
@SuppressWarnings("unchecked")
public T next() {
if(current >= numBuckets)
throw new NoSuchElementException();
T val = (T) segmentedGet(segmentedArray, current);
moveToNext();
return val;
}
protected void moveToNext() {
current++;
while(current < numBuckets && segmentedGet(segmentedArray, current) == null) {
current++;
}
}
@Override
public void remove() {
throw new UnsupportedOperationException("VMS error: Cannot remove from a HeapFriendlyMapIterator");
}
}
protected static abstract class AbstractHeapFriendlyMapEntry<K, V> implements Map.Entry<K, V> {
@Override
public V setValue(V value) {
throw new UnsupportedOperationException("Cannot set value for HeapFriendlyMap Entry");
}
@Override
@SuppressWarnings("rawtypes")
public boolean equals(Object o) {
if(Entry.class.isAssignableFrom(o.getClass())) {
Entry other = (Entry)o;
return (getKey()==null ?
other.getKey()==null : getKey().equals(other.getKey())) &&
(getValue()==null ?
other.getValue()==null : getValue().equals(other.getValue()));
}
return false;
}
@Override
public int hashCode() {
return (getKey()==null ? 0 : getKey().hashCode()) ^
(getValue()==null ? 0 : getValue().hashCode());
}
public String toString() {
return getKey() + "=" + getValue();
}
}
}
| 8,313 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/heapfriendly/HeapFriendlyHashMap.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.heapfriendly;
import static com.netflix.zeno.util.collections.heapfriendly.HeapFriendlyMapArrayRecycler.INDIVIDUAL_OBJECT_ARRAY_SIZE;
import java.util.AbstractSet;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Set;
/**
*
* The HeapFriendlyHashMap is an open-addressed, linear probing hash table. It uses two segmented arrays, one to hold the keys
* and one to hold the values.
*
* @author dkoszewnik
*
*/
public class HeapFriendlyHashMap<K, V> extends AbstractHeapFriendlyMap<K, V> {
private final Object[][] keys;
private final Object[][] values;
private final int numBuckets;
private final int maxSize;
private final HeapFriendlyMapArrayRecycler recycler;
private int size;
public HeapFriendlyHashMap(int numEntries) {
this(numEntries, HeapFriendlyMapArrayRecycler.get());
}
public HeapFriendlyHashMap(int numEntries, HeapFriendlyMapArrayRecycler recycler) {
int arraySize = numEntries * 10 / 7; // 70% load factor
arraySize = 1 << (32 - Integer.numberOfLeadingZeros(arraySize)); // next power of 2
arraySize = Math.max(arraySize, INDIVIDUAL_OBJECT_ARRAY_SIZE);
this.numBuckets = arraySize;
this.maxSize = numEntries;
this.recycler = recycler;
this.keys = createSegmentedObjectArray(arraySize);
this.values = createSegmentedObjectArray(arraySize);
}
private Object[][] createSegmentedObjectArray(int arraySize) {
int numArrays = arraySize / INDIVIDUAL_OBJECT_ARRAY_SIZE;
Object[][] segmentedArray = new Object[numArrays][];
for(int i=0;i<numArrays;i++) {
segmentedArray[i] = recycler.getObjectArray();
}
return segmentedArray;
}
@SuppressWarnings("unchecked")
public V put(K key, V value) {
if(size >= maxSize && !containsKey(key))
throw new UnsupportedOperationException("Cannot add more elements than " + maxSize);
if(key == null || value == null)
throw new NullPointerException("Null keys / values not supported in HeapFriendlyHashMap");
int hashCode = rehash(key.hashCode());
/// numBuckets is a power of 2, so the operation [x & (numBuckets - 1)]
/// is equivalent to [Math.abs(x % numBuckets)]
int bucket = hashCode & (numBuckets - 1);
K foundKey = (K) segmentedGet(keys, bucket);
while(foundKey != null && !foundKey.equals(key)) {
bucket = (bucket + 1) & (numBuckets - 1);
foundKey = (K) segmentedGet(keys, bucket);
}
V foundValue = (V) segmentedGet(values, bucket);
segmentedSet(keys, bucket, key);
segmentedSet(values, bucket, value);
if(foundValue == null)
size++;
return foundValue;
}
@Override
@SuppressWarnings("unchecked")
public V get(Object key) {
if(key == null)
return null;
int hashCode = rehash(key.hashCode());
/// numBuckets is a power of 2, so the operation [x & (numBuckets - 1)]
/// is equivalent to [Math.abs(x % numBuckets)]
int bucket = hashCode & (numBuckets - 1);
K foundKey = (K) segmentedGet(keys, bucket);
while(foundKey != null) {
if(foundKey.equals(key)) {
return (V) segmentedGet(values, bucket);
}
bucket = (bucket + 1) & (numBuckets - 1);
foundKey = (K) segmentedGet(keys, bucket);
}
return null;
}
@Override
public boolean containsKey(Object key) {
if(key == null)
return false;
return get(key) != null;
}
private int rehash(int hash) {
hash = ~hash + (hash << 15);
hash = hash ^ (hash >>> 12);
hash = hash + (hash << 2);
hash = hash ^ (hash >>> 4);
hash = hash * 2057;
hash = hash ^ (hash >>> 16);
return hash;
}
@Override
public int size() {
return size;
}
@Override
public boolean isEmpty() {
return size == 0;
}
@Override
public boolean containsValue(Object value) {
for(V foundValue : values()) {
if(foundValue.equals(value)) {
return true;
}
}
return false;
}
@Override
public Set<K> keySet() {
return new AbstractSet<K>() {
public Iterator<K> iterator() {
return new HeapFriendlyMapIterator<K>(keys, numBuckets);
}
@Override
public boolean contains(Object value) {
return containsKey(value);
}
@Override
public int size() {
return size;
}
};
}
@Override
public Collection<V> values() {
return new AbstractSet<V>() {
@Override
public Iterator<V> iterator() {
return new HeapFriendlyMapIterator<V>(values, numBuckets);
}
@Override
public int size() {
return size;
}
};
}
@Override
public Set<Map.Entry<K, V>> entrySet() {
return new AbstractSet<Map.Entry<K, V>>() {
@Override
public Iterator<Map.Entry<K, V>> iterator() {
return new HeapFriendlyMapIterator<Map.Entry<K,V>>(keys, numBuckets) {
@Override
@SuppressWarnings("unchecked")
public Map.Entry<K, V> next() {
if(current >= numBuckets)
throw new NoSuchElementException();
K key = (K) segmentedGet(segmentedArray, current);
Entry<K, V> entry = new Entry<K, V>(key, (V) segmentedGet(values, current));
moveToNext();
return entry;
}
};
}
@Override
public int size() {
return size;
}
};
}
@Override
public void releaseObjectArrays() {
releaseObjectArrays(keys, recycler);
releaseObjectArrays(values, recycler);
}
private static class Entry<K, V> extends AbstractHeapFriendlyMapEntry<K, V> {
private final K key;
private final V value;
public Entry(K key, V value) {
this.key = key;
this.value = value;
}
@Override
public K getKey() {
return key;
}
@Override
public V getValue() {
return value;
}
}
}
| 8,314 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/heapfriendly/HeapFriendlyMapArrayRecycler.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.heapfriendly;
import java.util.Arrays;
import java.util.LinkedList;
/**
* Contains two pools of Object[] arrays.<p/>
*
* When Netflix's Video Metadata Service receives a FastBlob delta update, it applies the delta to it's FastBlobStateEngine, then indexes
* many Objects by their primary keys in hash tables.<p/>
*
* Because ParNew is a stop the world event, and ParNew time is directly proportional to the number of Objects which survive after
* creation and must be copied to survivor spaces / OldGen, we can reduce the GC impact of Map creation if we reuse Objects which
* have already been promoted to OldGen.<p/>
*
* We maintain two pools of Object[] arrays. One is for the "current" cycle, and one is for the "next" cycle.
* On each cycle, we take the Object arrays comprising the current HeapFriendlyHashMap segmented arrays, and
* return them to the "next" cycle pool. When we create new HeapFriendlyHashMap objects, we construct the
* segmented arrays with segments from the "current" cycle pool.<p/>
*
* At the beginning of each update cycle, we swap the pointers to the "current" and "next" cycle pools. This way, we're always
* overwriting the data from 2 cycles ago, and the Object arrays just remain in OldGen.
*
* @author dkoszewnik
*
*/
public class HeapFriendlyMapArrayRecycler {
public static final int INDIVIDUAL_OBJECT_ARRAY_SIZE = 4096;
private LinkedList<Object[]> currentCycleObjects;
private LinkedList<Object[]> nextCycleObjects;
public HeapFriendlyMapArrayRecycler() {
this.currentCycleObjects = new LinkedList<Object[]>();
this.nextCycleObjects = new LinkedList<Object[]>();
}
public Object[] getObjectArray() {
if(!currentCycleObjects.isEmpty()) {
return currentCycleObjects.removeFirst();
}
return new Object[INDIVIDUAL_OBJECT_ARRAY_SIZE];
}
public void returnObjectArray(Object[] toReturn) {
nextCycleObjects.addLast(toReturn);
}
public void clearNextCycleObjectArrays() {
for(Object[] arr : nextCycleObjects) {
Arrays.fill(arr, null);
}
}
public void swapCycleObjectArrays() {
LinkedList<Object[]> temp = currentCycleObjects;
currentCycleObjects = nextCycleObjects;
nextCycleObjects = temp;
}
public void clear() {
currentCycleObjects.clear();
nextCycleObjects.clear();
}
private final static HeapFriendlyMapArrayRecycler theInstance = new HeapFriendlyMapArrayRecycler();
public static HeapFriendlyMapArrayRecycler get() {
return theInstance;
}
}
| 8,315 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/heapfriendly/HeapFriendlyDerivableKeyHashMap.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.heapfriendly;
import static com.netflix.zeno.util.collections.heapfriendly.HeapFriendlyMapArrayRecycler.INDIVIDUAL_OBJECT_ARRAY_SIZE;
import java.util.AbstractSet;
import java.util.Collection;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.Set;
/**
* The HeapFriendlyDerivableKeyHashMap is an open-addressed, linear probing hash table. It uses a single array to hold only the values,
* and assumes that keys are derivable from values. The HeapFriendlyDerivableKeyHashMap may be used to conserve memory.<p/>
*
* HeapFriendlyDerivableKeyHashMap must be extended to override the deriveKey method.<p/>
*
* The key should be derivable from the value without causing any overhead. For example, if K is a field in V, then
* the implementation may be as simple as "return value.getKey();".<p/>
*
* However, if a compound key is required, or if any non-trivial amount of work must be done to derive the key, the
* HeapFriendlyDerivableKeyHashMap may not be appropriate. Instead see {@link HeapFriendlyHashMap}
*
* @author dkoszewnik
*
*/
public abstract class HeapFriendlyDerivableKeyHashMap<K, V> extends AbstractHeapFriendlyMap<K, V> {
private final Object[][] values;
private final int numBuckets;
private final int maxSize;
private final HeapFriendlyMapArrayRecycler recycler;
private int size;
protected HeapFriendlyDerivableKeyHashMap(int numEntries) {
this(numEntries, HeapFriendlyMapArrayRecycler.get());
}
protected HeapFriendlyDerivableKeyHashMap(int numEntries, HeapFriendlyMapArrayRecycler recycler) {
int arraySize = numEntries * 10 / 7; // 70% load factor
arraySize = 1 << (32 - Integer.numberOfLeadingZeros(arraySize)); // next power of 2
arraySize = Math.max(arraySize, INDIVIDUAL_OBJECT_ARRAY_SIZE);
this.numBuckets = arraySize;
this.maxSize = numEntries;
this.recycler = recycler;
values = createSegmentedObjectArray(arraySize);
}
private Object[][] createSegmentedObjectArray(int arraySize) {
int numArrays = arraySize / INDIVIDUAL_OBJECT_ARRAY_SIZE;
Object[][] segmentedArray = new Object[numArrays][];
for(int i=0;i<numArrays;i++) {
segmentedArray[i] = recycler.getObjectArray();
}
return segmentedArray;
}
@SuppressWarnings("unchecked")
public V put(V value) {
if(size == maxSize && !containsKey(deriveKey(value)))
throw new UnsupportedOperationException("Cannot add more elements than " + maxSize);
K key = deriveKey(value);
if(key == null) {
throw new NullPointerException("Null keys not allowed in HeapFriendlyDerivableKeyHashMap");
}
int hashCode = rehash(key.hashCode());
/// numBuckets is a power of 2, so the operation [x & (numBuckets - 1)]
/// is equivalent to [Math.abs(x % numBuckets)]
int bucket = hashCode & (numBuckets - 1);
V foundValue = (V) segmentedGet(values, bucket);
while(foundValue != null && !deriveKey(foundValue).equals(deriveKey(value))) {
bucket = (bucket + 1) & (numBuckets - 1);
foundValue = (V) segmentedGet(values, bucket);
}
segmentedSet(values, bucket, value);
if(foundValue == null)
size++;
return foundValue;
}
@SuppressWarnings("unchecked")
public V get(Object key) {
int hashCode = rehash(key.hashCode());
int bucket = hashCode & (numBuckets - 1);
V foundValue = (V) segmentedGet(values, bucket);
while(foundValue != null) {
if(deriveKey(foundValue).equals(key))
return foundValue;
bucket = (bucket + 1) & (numBuckets - 1);
foundValue = (V) segmentedGet(values, bucket);
}
return null;
}
@Override
public int size() {
return size;
}
@Override
public boolean isEmpty() {
return size == 0;
}
@Override
public boolean containsKey(Object key) {
return get(key) != null;
}
@Override
@SuppressWarnings("unchecked")
public boolean containsValue(Object value) {
if(value == null)
return false;
return get(deriveKey((V)value)) != null;
}
@Override
public void releaseObjectArrays() {
releaseObjectArrays(values, recycler);
}
@Override
public Set<K> keySet() {
return new AbstractSet<K>() {
public Iterator<K> iterator() {
return new HeapFriendlyMapIterator<K>(values, numBuckets) {
@Override
@SuppressWarnings("unchecked")
public K next() {
K key = deriveKey((V) segmentedGet(values, current));
moveToNext();
return key;
}
};
}
@Override
public boolean contains(Object value) {
return containsKey(value);
}
@Override
public int size() {
return size;
}
};
}
@Override
public Collection<V> values() {
return new AbstractSet<V>() {
@Override
public Iterator<V> iterator() {
return new HeapFriendlyMapIterator<V>(values, numBuckets);
}
@Override
public int size() {
return size;
}
};
}
@Override
public Set<Entry<K, V>> entrySet() {
return new AbstractSet<Entry<K, V>>() {
@Override
public Iterator<Entry<K, V>> iterator() {
return new HeapFriendlyMapIterator<Entry<K,V>>(values, numBuckets) {
@Override
@SuppressWarnings("unchecked")
public Entry<K, V> next() {
if(current >= numBuckets)
throw new NoSuchElementException();
Entry<K, V> entry = new DerivableKeyHashMapEntry((V) segmentedGet(segmentedArray, current));
moveToNext();
return entry;
}
};
}
@Override
public int size() {
return size;
}
};
}
/**
* Each implementation of HeapFriendlyDerivableKeyHashMap must be overridden to implement the deriveKey method.
*
* The key should be derivable from the value without causing any overhead. For example, if K is a field in V, then
* the implementation may be as simple as "return value.getKey();".
*
* However, if a compound key is required, or if any non-trivial amount of work must be done to derive the key, the
* HeapFriendlyDerivableKeyHashMap may not be appropriate. Instead see {@link HeapFriendlyHashMap}
*
*/
protected abstract K deriveKey(V value);
@Override
public V put(K key, V value) {
throw new UnsupportedOperationException("VMS error: HeapFriendlyDerivableKeyMap cannot be added to with a specified key. Please use put(V value).");
}
private int rehash(int hash) {
hash = ~hash + (hash << 15);
hash = hash ^ (hash >>> 12);
hash = hash + (hash << 2);
hash = hash ^ (hash >>> 4);
hash = hash * 2057;
hash = hash ^ (hash >>> 16);
return hash;
}
private class DerivableKeyHashMapEntry extends AbstractHeapFriendlyMapEntry<K, V> {
private final V value;
DerivableKeyHashMapEntry(V value) {
this.value = value;
}
@Override
public K getKey() {
return deriveKey(value);
}
@Override
public V getValue() {
return value;
}
}
}
| 8,316 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/algorithms/Sortable.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.algorithms;
/**
* Interface to array-like structures. It allow using those structures with
* Binary Search and quick sort algorithms
*
* @author tvaliulin
*
* @param <V>
*/
public interface Sortable<V> {
V at(int index);
void swap(int i1, int i2);
int size();
} | 8,317 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/algorithms/BinarySearch.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.algorithms;
import java.util.Comparator;
/**
*
* Flavor of BinarySearch algorithm which works with Array interface
*
* @author tvaliulin
*
*/
public class BinarySearch {
/**
* Checks that {@code fromIndex} and {@code toIndex} are in the range and
* throws an appropriate exception, if they aren't.
*/
private static void rangeCheck(int length, int fromIndex, int toIndex) {
if (fromIndex > toIndex) {
throw new IllegalArgumentException("fromIndex(" + fromIndex + ") > toIndex(" + toIndex + ")");
}
if (fromIndex < 0) {
throw new ArrayIndexOutOfBoundsException(fromIndex);
}
if (toIndex > length) {
throw new ArrayIndexOutOfBoundsException(toIndex);
}
}
/**
* Searches a range of the specified array for the specified object using
* the binary search algorithm. The range must be sorted into ascending
* order according to the specified comparator (as by the
* {@link #sort(Object[], int, int, Comparator) sort(T[], int, int,
* Comparator)} method) prior to making this call. If it is not sorted, the
* results are undefined. If the range contains multiple elements equal to
* the specified object, there is no guarantee which one will be found.
*
* @param a
* the array to be searched
* @param fromIndex
* the index of the first element (inclusive) to be searched
* @param toIndex
* the index of the last element (exclusive) to be searched
* @param key
* the value to be searched for
* @param c
* the comparator by which the array is ordered. A <tt>null</tt>
* value indicates that the elements' {@linkplain Comparable
* natural ordering} should be used.
* @return index of the search key, if it is contained in the array within
* the specified range; otherwise,
* <tt>(-(<i>insertion point</i>) - 1)</tt>. The <i>insertion
* point</i> is defined as the point at which the key would be
* inserted into the array: the index of the first element in the
* range greater than the key, or <tt>toIndex</tt> if all elements
* in the range are less than the specified key. Note that this
* guarantees that the return value will be >= 0 if and only if
* the key is found.
* @throws ClassCastException
* if the range contains elements that are not <i>mutually
* comparable</i> using the specified comparator, or the search
* key is not comparable to the elements in the range using this
* comparator.
* @throws IllegalArgumentException
* if {@code fromIndex > toIndex}
* @throws ArrayIndexOutOfBoundsException
* if {@code fromIndex < 0 or toIndex > a.length}
* @since 1.6
*/
public static <T> int binarySearch(Sortable<T> a, int fromIndex, int toIndex, T key, Comparator<? super T> c) {
rangeCheck(a.size(), fromIndex, toIndex);
return binarySearch0(a, fromIndex, toIndex, key, c);
}
public static <T> int binarySearch(Sortable<T> a, T key, Comparator<? super T> c) {
return binarySearch(a, 0, a.size(), key, c);
}
// Like public version, but without range checks.
private static <T> int binarySearch0(Sortable<T> a, int fromIndex, int toIndex, T key, Comparator<? super T> c) {
if (c == null) {
throw new NullPointerException();
}
int low = fromIndex;
int high = toIndex - 1;
while (low <= high) {
int mid = (low + high) >>> 1;
T midVal = a.at(mid);
int cmp = c.compare(midVal, key);
if (cmp < 0)
low = mid + 1;
else if (cmp > 0)
high = mid - 1;
else
return mid; // key found
}
return -(low + 1); // key not found.
}
}
| 8,318 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/algorithms/ArrayQuickSort.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.algorithms;
import java.util.Comparator;
/**
* A simple quicksort implementation.
*
* @author dkoszewnik
*
*/
public class ArrayQuickSort {
private static int nextPivot = 0;
public static <E> void sort(Sortable<E> arr, Comparator<E> comparator) {
nextPivot = 0;
quicksort(arr, comparator, 0, arr.size() - 1);
}
private static <E> void quicksort(Sortable<E> arr, Comparator<E> comparator, int from, int to) {
if(to > from) {
int pivotIndex = findPivot(from, to);
pivotIndex = pivot(arr, comparator, from, to, pivotIndex);
quicksort(arr, comparator, from, pivotIndex - 1);
quicksort(arr, comparator, pivotIndex + 1, to);
}
}
private static <E> int findPivot(int from, int to) {
return (++nextPivot % ((to - from) + 1)) + from;
}
private static <E> int pivot(Sortable<E> arr, Comparator<E> comparator, int from, int to, int pivotIndex) {
E pivotValue = arr.at(pivotIndex);
arr.swap(pivotIndex, to);
for(int i=from;i<to;i++) {
if(comparator.compare(arr.at(i), pivotValue) <= 0) {
arr.swap(i, from);
from++;
}
}
arr.swap(from, to);
return from;
}
}
| 8,319 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/builder/ListBuilder.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.builder;
import java.util.List;
/**
* List builder interface which facilitates creation of Lists in serializer
*
* @author tvaliulin
*
* @param <E>
*/
public interface ListBuilder<E> {
void builderInit(int size);
void builderSet(int index, E element);
List<E> builderFinish();
}
| 8,320 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/builder/SetBuilder.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.builder;
import java.util.Set;
/**
* Set builder interface which facilitates creation of Sets in serializer
*
* @author tvaliulin
*
* @param <E>
*/
public interface SetBuilder<E> {
void builderInit(int size);
void builderSet(int index, E element);
Set<E> builderFinish();
}
| 8,321 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/builder/Builders.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.builder;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
/**
* Java Utils containers builders
*
* @author tvaliulin
*
*/
public class Builders {
public static class HashMapBuilder<K, V> implements MapBuilder<K, V> {
HashMap<K, V> map;
@Override
public void builderInit(int size) {
this.map = new HashMap<K, V>(size);
}
@Override
public void builderPut(int index, K key, V value) {
map.put(key, value);
}
@Override
public Map<K, V> builderFinish() {
return map;
}
}
public static class TreeMapBuilder<K, V> implements MapBuilder<K, V> {
TreeMap<K, V> map;
@Override
public void builderInit(int size) {
this.map = new TreeMap<K, V>();
}
@Override
public void builderPut(int index, K key, V value) {
map.put(key, value);
}
@Override
public SortedMap<K, V> builderFinish() {
return map;
}
}
public static class ArrayListBuilder<E> implements ListBuilder<E> {
List<E> list;
@Override
public void builderInit(int size) {
list = new ArrayList<E>(size);
}
@Override
public void builderSet(int index, E element) {
list.add(element);
}
@Override
public List<E> builderFinish() {
return list;
}
}
public static class HashSetBuilder<E> implements SetBuilder<E> {
Set<E> set;
@Override
public void builderInit(int size) {
set = new HashSet<E>(size);
}
@Override
public void builderSet(int index, E element) {
set.add(element);
}
@Override
public Set<E> builderFinish() {
return set;
}
}
}
| 8,322 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections | Create_ds/zeno/src/main/java/com/netflix/zeno/util/collections/builder/MapBuilder.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.util.collections.builder;
import java.util.Map;
/**
* Map builder interface which facilitates creation of Lists in serializer
*
* @author tvaliulin
*
* @param <E>
*/
public interface MapBuilder<K, V> {
void builderInit(int size);
void builderPut(int index, K key, V value);
Map<K, V> builderFinish();
}
| 8,323 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/hash/HashGenericRecord.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.hash;
import com.netflix.zeno.hash.HashGenericRecordSerializers.Serializer;
import com.netflix.zeno.serializer.NFSerializationRecord;
/**
*
* @author tvaliulin
*
*/
public final class HashGenericRecord extends NFSerializationRecord {
HashAlgorithm hasher;
public HashGenericRecord() {
this(new HashOrderDependent());
}
public HashGenericRecord(HashAlgorithm hasher) {
this.hasher = hasher;
}
public Object get(int arg0) {
throw new UnsupportedOperationException();
}
public void put(int arg0, Object arg1) {
write(arg0);
write(arg1);
}
public void put(String arg0, Object arg1) {
write(arg0);
write(":");
write(arg1);
}
private void write(Object obj) {
try {
if (obj == null) {
hasher.write(0);
return;
}
if (obj.getClass().isEnum()) {
hasher.write(((Enum<?>) obj).name());
} else if (obj.getClass().isArray()) {
if (obj.getClass().getComponentType().isPrimitive()) {
Serializer serializer = HashGenericRecordSerializers.getPrimitiveArraySerializer(obj.getClass().getComponentType());
if (serializer == null) {
throw new RuntimeException("Can't find serializer for array of type:" + obj.getClass());
}
serializer.serialize(hasher, obj);
} else {
Object[] objects = (Object[]) obj;
for (Object object : objects) {
write(object);
}
}
} else {
Serializer serializer = HashGenericRecordSerializers.getTypeSerializer(obj.getClass());
if (serializer == null) {
throw new RuntimeException("Can't find serializer for type:" + obj.getClass());
}
serializer.serialize(hasher, obj);
}
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
public byte[] hash() {
return hasher.bytes();
}
}
| 8,324 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/hash/HashOrderDependent.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.hash;
import java.io.DataOutputStream;
import java.io.IOException;
import java.security.DigestOutputStream;
import java.security.MessageDigest;
import org.apache.commons.io.output.NullOutputStream;
/**
* Implements hashing algorithm which is order dependent
*
* @author tvaliulin
*
*/
public class HashOrderDependent implements HashAlgorithm
{
MessageDigest digest;
DigestOutputStream digestOutputStream;
DataOutputStream dataOutputStream;
public HashOrderDependent(){
try{
digest = MessageDigest.getInstance("MD5");
} catch ( Exception ex ) {
throw new RuntimeException(ex);
}
digestOutputStream = new DigestOutputStream(NullOutputStream.NULL_OUTPUT_STREAM, digest);
dataOutputStream = new DataOutputStream(digestOutputStream);
}
@Override
public void write(char b) throws IOException {
dataOutputStream.write(b);
}
@Override
public void write(boolean b) throws IOException {
dataOutputStream.writeBoolean(b);
}
@Override
public void write(long b) throws IOException {
dataOutputStream.writeLong(b);
}
@Override
public void write(float b) throws IOException {
dataOutputStream.writeFloat(b);
}
@Override
public void write(double b) throws IOException {
dataOutputStream.writeDouble(b);
}
@Override
public void write(String b) throws IOException {
dataOutputStream.writeUTF(b);
}
@Override
public void write(byte[] b) throws IOException {
dataOutputStream.write(b);
}
@Override
public void write(char[] b) throws IOException {
for(char c : b){
dataOutputStream.writeChar(c);
}
}
@Override
public void write(boolean[] b) throws IOException {
for(boolean c : b){
dataOutputStream.writeBoolean(c);
}
}
@Override
public void write(short[] b) throws IOException {
for(short c : b){
dataOutputStream.writeShort(c);
}
}
@Override
public void write(int[] b) throws IOException {
for(int c : b){
dataOutputStream.writeInt(c);
}
}
@Override
public void write(long[] b) throws IOException {
for(long c : b){
dataOutputStream.writeLong(c);
}
}
@Override
public void write(float[] b) throws IOException {
for(float c : b){
dataOutputStream.writeFloat(c);
}
}
@Override
public void write(double[] b) throws IOException {
for(double c : b){
dataOutputStream.writeDouble(c);
}
}
@Override
public byte[] bytes()
{
try {
digestOutputStream.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
return digest.digest();
}
}
| 8,325 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/hash/HashOrderIndependent.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.hash;
import java.io.IOException;
/**
* OrderIndependent implementation of the hashing algorithm
*
* @author tvaliulin
*/
public final class HashOrderIndependent implements HashAlgorithm {
// TODO : increase capacity of the accumulator by at least one more long
long hashCode = 0;
/**
* Constructor that takes in the OutputStream that we are wrapping and
* creates the MD5 MessageDigest
*/
public HashOrderIndependent() {
super();
}
private static long hash(long h) {
// This function ensures that hashCodes that differ only by
// constant multiples at each bit position have a bounded
// number of collisions (approximately 8 at default load factor).
h ^= (h >>> 20) ^ (h >>> 12);
return h ^ (h >>> 7) ^ (h >>> 4);
}
/*
* (non-Javadoc)
*
* @see com.netflix.videometadata.serializer.blob.HashAlgorithm#write(char)
*/
@Override
public void write(char b) throws IOException {
write("char");
write((long) b);
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.videometadata.serializer.blob.HashAlgorithm#write(boolean)
*/
@Override
public void write(boolean b) throws IOException {
write("boolean");
write((b ? 0xf00bf00b : 0xf81bc437));
}
/*
* (non-Javadoc)
*
* @see com.netflix.videometadata.serializer.blob.HashAlgorithm#write(long)
*/
@Override
public void write(long b) throws IOException {
write("long");
hashCode = hashCode + hash(b);
}
/*
* (non-Javadoc)
*
* @see com.netflix.videometadata.serializer.blob.HashAlgorithm#write(float)
*/
@Override
public void write(float b) throws IOException {
write("float");
write(Float.floatToIntBits(b));
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.videometadata.serializer.blob.HashAlgorithm#write(double)
*/
@Override
public void write(double b) throws IOException {
write("double");
write(Double.doubleToLongBits(b));
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.videometadata.serializer.blob.HashAlgorithm#write(java.lang
* .String)
*/
@Override
public void write(String b) throws IOException {
long code = 0;
for (int i = 0; i < b.length(); i++) {
code = 31 * code + b.charAt(i);
}
hashCode += hash(code);
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.videometadata.serializer.blob.HashAlgorithm#write(byte[])
*/
@Override
public void write(byte[] b) throws IOException {
write("byte[]");
long code = 0;
for (int i = 0; i < b.length; i++) {
code = 31 * code + b[i];
}
write(code);
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.videometadata.serializer.blob.HashAlgorithm#write(char[])
*/
@Override
public void write(char[] b) throws IOException {
write("char[]");
long code = 0;
for (int i = 0; i < b.length; i++) {
code = 31 * code + b[i];
}
write(code);
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.videometadata.serializer.blob.HashAlgorithm#write(boolean[])
*/
@Override
public void write(boolean[] b) throws IOException {
write("boolean[]");
long code = 0;
for (int i = 0; i < b.length; i++) {
code = 31 * code + (b[i] ? 2 : 1);
}
write(code);
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.videometadata.serializer.blob.HashAlgorithm#write(short[])
*/
@Override
public void write(short[] b) throws IOException {
write("short[]");
long code = 0;
for (int i = 0; i < b.length; i++) {
code = 31 * code + b[i];
}
write(code);
}
/*
* (non-Javadoc)
*
* @see com.netflix.videometadata.serializer.blob.HashAlgorithm#write(int[])
*/
@Override
public void write(int[] b) throws IOException {
write("int[]");
long code = 0;
for (int i = 0; i < b.length; i++) {
code = 31 * code + b[i];
}
write(code);
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.videometadata.serializer.blob.HashAlgorithm#write(long[])
*/
@Override
public void write(long[] b) throws IOException {
write("long[]");
long code = 0;
for (int i = 0; i < b.length; i++) {
code = 31 * code + b[i];
}
write(code);
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.videometadata.serializer.blob.HashAlgorithm#write(float[])
*/
@Override
public void write(float[] b) throws IOException {
write("float[]");
long code = 0;
for (int i = 0; i < b.length; i++) {
code = 31 * code + Float.floatToIntBits(b[i]);
}
write(code);
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.videometadata.serializer.blob.HashAlgorithm#write(double[])
*/
@Override
public void write(double[] b) throws IOException {
write("double[]");
long code = 0;
for (int i = 0; i < b.length; i++) {
code = 31 * code + Double.doubleToLongBits(b[i]);
}
write(code);
}
/*
* (non-Javadoc)
*
* @see com.netflix.videometadata.serializer.blob.HashAlgorithm#bytes()
*/
@Override
public byte[] bytes() {
return new byte[] { (byte) ((hashCode >> 56) & 0xff), (byte) ((hashCode >> 48) & 0xff), (byte) ((hashCode >> 40) & 0xff), (byte) ((hashCode >> 32) & 0xff), (byte) ((hashCode >> 24) & 0xff), (byte) ((hashCode >> 16) & 0xff), (byte) ((hashCode >> 8) & 0xff), (byte) ((hashCode >> 0) & 0xff), };
}
}
| 8,326 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/hash/HashAlgorithm.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.hash;
import java.io.IOException;
/**
* Hashing algorithm interface
*
* @author tvaliulin
*/
public interface HashAlgorithm {
public abstract void write(char b) throws IOException;
public abstract void write(boolean b) throws IOException;
public abstract void write(long b) throws IOException;
public abstract void write(float b) throws IOException;
public abstract void write(double b) throws IOException;
public abstract void write(String b) throws IOException;
public abstract void write(byte[] b) throws IOException;
public abstract void write(char[] b) throws IOException;
public abstract void write(boolean[] b) throws IOException;
public abstract void write(short[] b) throws IOException;
public abstract void write(int[] b) throws IOException;
public abstract void write(long[] b) throws IOException;
public abstract void write(float[] b) throws IOException;
public abstract void write(double[] b) throws IOException;
/**
* @return the hash of the previously written entities
*/
public abstract byte[] bytes();
} | 8,327 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/hash/HashGenericRecordSerializers.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.hash;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
/**
* A bunch of primitive serializers with corresponding array serializers
* @author tvaliulin
*/
class HashGenericRecordSerializers {
public static interface Serializer {
void serialize(HashAlgorithm hasher, Object obj) throws IOException;
}
static Map<Class<?>, Serializer> serializers = new HashMap<Class<?>, Serializer>();
static Map<Class<?>, Serializer> primitiveArraySerializers = new HashMap<Class<?>, Serializer>();
// Constructing static serializers per type
static {
{
Serializer stringSerializer = new Serializer() {
@Override
public void serialize(HashAlgorithm hasher, Object obj) throws IOException {
hasher.write((String) obj);
}
};
serializers.put(String.class, stringSerializer);
Serializer doubleSerializer = new Serializer() {
@Override
public void serialize(HashAlgorithm hasher, Object obj) throws IOException {
hasher.write((Double) obj);
}
};
serializers.put(Double.class, doubleSerializer);
serializers.put(Double.TYPE, doubleSerializer);
Serializer floatSerializer = new Serializer() {
@Override
public void serialize(HashAlgorithm hasher, Object obj) throws IOException {
hasher.write((Float) obj);
}
};
serializers.put(Float.class, floatSerializer);
serializers.put(Float.TYPE, floatSerializer);
Serializer longSerializer = new Serializer() {
@Override
public void serialize(HashAlgorithm hasher, Object obj) throws IOException {
hasher.write((Long) obj);
}
};
serializers.put(Long.class, longSerializer);
serializers.put(Long.TYPE, longSerializer);
Serializer integerSerializer = new Serializer() {
@Override
public void serialize(HashAlgorithm hasher, Object obj) throws IOException {
hasher.write((Integer) obj);
}
};
serializers.put(Integer.class, integerSerializer);
serializers.put(Integer.TYPE, integerSerializer);
Serializer shortSerializer = new Serializer() {
@Override
public void serialize(HashAlgorithm hasher, Object obj) throws IOException {
hasher.write((Short) obj);
}
};
serializers.put(Short.class, shortSerializer);
serializers.put(Short.TYPE, shortSerializer);
Serializer byteSerializer = new Serializer() {
@Override
public void serialize(HashAlgorithm hasher, Object obj) throws IOException {
hasher.write((Byte) obj);
}
};
serializers.put(Byte.class, byteSerializer);
serializers.put(Byte.TYPE, byteSerializer);
Serializer booleanSerializer = new Serializer() {
@Override
public void serialize(HashAlgorithm hasher, Object obj) throws IOException {
hasher.write((Boolean) obj);
}
};
serializers.put(Boolean.class, booleanSerializer);
serializers.put(Boolean.TYPE, booleanSerializer);
Serializer characterSerializer = new Serializer() {
@Override
public void serialize(HashAlgorithm hasher, Object obj) throws IOException {
hasher.write((Character) obj);
}
};
serializers.put(Character.class, characterSerializer);
serializers.put(Character.TYPE, characterSerializer);
}
{
Serializer doubleSerializer = new Serializer() {
@Override
public void serialize(HashAlgorithm hasher, Object obj) throws IOException {
hasher.write((double[]) obj);
}
};
primitiveArraySerializers.put(Double.TYPE, doubleSerializer);
Serializer floatSerializer = new Serializer() {
@Override
public void serialize(HashAlgorithm hasher, Object obj) throws IOException {
hasher.write((float[]) obj);
}
};
primitiveArraySerializers.put(Float.TYPE, floatSerializer);
Serializer longSerializer = new Serializer() {
@Override
public void serialize(HashAlgorithm hasher, Object obj) throws IOException {
hasher.write((long[]) obj);
}
};
primitiveArraySerializers.put(Long.TYPE, longSerializer);
Serializer integerSerializer = new Serializer() {
@Override
public void serialize(HashAlgorithm hasher, Object obj) throws IOException {
hasher.write((int[]) obj);
}
};
primitiveArraySerializers.put(Integer.TYPE, integerSerializer);
Serializer shortSerializer = new Serializer() {
@Override
public void serialize(HashAlgorithm hasher, Object obj) throws IOException {
hasher.write((short[]) obj);
}
};
primitiveArraySerializers.put(Short.TYPE, shortSerializer);
Serializer byteSerializer = new Serializer() {
@Override
public void serialize(HashAlgorithm hasher, Object obj) throws IOException {
hasher.write((byte[]) obj);
}
};
primitiveArraySerializers.put(Byte.TYPE, byteSerializer);
Serializer booleanSerializer = new Serializer() {
@Override
public void serialize(HashAlgorithm hasher, Object obj) throws IOException {
hasher.write((boolean[]) obj);
}
};
primitiveArraySerializers.put(Boolean.TYPE, booleanSerializer);
Serializer characterSerializer = new Serializer() {
@Override
public void serialize(HashAlgorithm hasher, Object obj) throws IOException {
hasher.write((char[]) obj);
}
};
primitiveArraySerializers.put(Character.TYPE, characterSerializer);
}
}
public static Serializer getTypeSerializer(Class<?> type) {
return serializers.get(type);
}
public static Serializer getPrimitiveArraySerializer(Class<?> type) {
return primitiveArraySerializers.get(type);
}
}
| 8,328 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/hash/HashFrameworkSerializer.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.hash;
import com.netflix.zeno.serializer.FrameworkSerializer;
import com.netflix.zeno.serializer.NFTypeSerializer;
import com.netflix.zeno.serializer.SerializationFramework;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
/**
* @author tvaliulin
*
*/
public class HashFrameworkSerializer extends FrameworkSerializer<HashGenericRecord> {
HashFrameworkSerializer(SerializationFramework framework) {
super(framework);
}
@Override
public void serializePrimitive(HashGenericRecord rec, String fieldName, Object value) {
if (value == null) {
return;
}
HashGenericRecord record = rec;
record.put(fieldName, value);
}
@Override
public void serializeBytes(HashGenericRecord rec, String fieldName, byte[] value) {
serializePrimitive(rec, fieldName, value);
}
/*
* @Deprecated instead use serializeObject(HashGenericRecord rec, String fieldName, Object obj)
*
*/
@SuppressWarnings({ "unchecked" })
@Override
public void serializeObject(HashGenericRecord rec, String fieldName, String typeName, Object obj) {
if (obj == null) {
return;
}
getSerializer(typeName).serialize(obj, rec);
}
@Override
public void serializeObject(HashGenericRecord rec, String fieldName, Object obj) {
serializeObject(rec, fieldName, rec.getSchema().getObjectType(fieldName), obj);
}
@Override
public <T> void serializeList(HashGenericRecord rec, String fieldName, String typeName, Collection<T> list) {
if (list == null) {
return;
}
rec.put(null, "[");
for (T t : list) {
serializeObject(rec, fieldName, typeName, t);
}
rec.put(null, "]");
}
@SuppressWarnings({ "rawtypes", "unchecked" })
@Override
public <T> void serializeSet(HashGenericRecord rec, String fieldName, String typeName, Set<T> set) {
if (set == null) {
return;
}
rec.put(null, "<");
NFTypeSerializer elementSerializer = (NFTypeSerializer) (framework.getSerializer(typeName));
HashGenericRecord independent = new HashGenericRecord(new HashOrderIndependent());
for (T t : set) {
HashGenericRecord dependent = new HashGenericRecord(new HashOrderDependent());
elementSerializer.serialize(t, dependent);
independent.put(null, dependent.hash());
}
rec.put(null, independent.hash());
rec.put(null, ">");
}
@SuppressWarnings({ "rawtypes", "unchecked" })
@Override
public <K, V> void serializeMap(HashGenericRecord rec, String fieldName, String keyTypeName, String valueTypeName, Map<K, V> map) {
if (map == null) {
return;
}
rec.put(null, "{");
NFTypeSerializer keySerializer = (NFTypeSerializer) (framework.getSerializer(keyTypeName));
NFTypeSerializer valueSerializer = (NFTypeSerializer) (framework.getSerializer(valueTypeName));
HashGenericRecord independent = new HashGenericRecord(new HashOrderIndependent());
for (Map.Entry<K, V> entry : map.entrySet()) {
HashGenericRecord dependent = new HashGenericRecord(new HashOrderDependent());
keySerializer.serialize(entry.getKey(), dependent);
valueSerializer.serialize(entry.getValue(), dependent);
independent.put(null, dependent.hash());
}
rec.put(null, independent.hash());
rec.put(null, "}");
}
}
| 8,329 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/hash/HashSerializationFramework.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.hash;
import com.netflix.zeno.serializer.NFTypeSerializer;
import com.netflix.zeno.serializer.SerializationFramework;
import com.netflix.zeno.serializer.SerializerFactory;
/**
*
* @author tvaliulin
*
*/
public class HashSerializationFramework extends SerializationFramework
{
public HashSerializationFramework(SerializerFactory factory) {
super(factory);
this.frameworkSerializer = new HashFrameworkSerializer(this);
}
public <T> byte[] getHash(String objectType, T object) {
NFTypeSerializer<T> serializer = getSerializer(objectType);
HashGenericRecord rec = new HashGenericRecord();
serializer.serialize(object, rec);
return rec.hash();
}
}
| 8,330 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/FastBlobFrameworkDeserializer.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob;
import static com.netflix.zeno.fastblob.FastBlobFrameworkSerializer.NULL_DOUBLE_BITS;
import static com.netflix.zeno.fastblob.FastBlobFrameworkSerializer.NULL_FLOAT_BITS;
import com.netflix.zeno.fastblob.record.ByteData;
import com.netflix.zeno.fastblob.record.FastBlobDeserializationRecord;
import com.netflix.zeno.fastblob.record.VarInt;
import com.netflix.zeno.fastblob.state.FastBlobTypeDeserializationState;
import com.netflix.zeno.serializer.FrameworkDeserializer;
import com.netflix.zeno.serializer.NFTypeSerializer;
import com.netflix.zeno.util.collections.CollectionImplementation;
import com.netflix.zeno.util.collections.MinimizedUnmodifiableCollections;
import com.netflix.zeno.util.collections.builder.ListBuilder;
import com.netflix.zeno.util.collections.builder.MapBuilder;
import com.netflix.zeno.util.collections.builder.SetBuilder;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
/**
*
* Defines the operations necessary to decode each of the "Zeno native" elements from FastBlob record fields.
*
* @author dkoszewnik
*
*/
public class FastBlobFrameworkDeserializer extends FrameworkDeserializer<FastBlobDeserializationRecord> {
private MinimizedUnmodifiableCollections minimizedCollections = new MinimizedUnmodifiableCollections(CollectionImplementation.JAVA_UTIL);
public FastBlobFrameworkDeserializer(FastBlobStateEngine framework) {
super(framework);
}
public void setCollectionImplementation(CollectionImplementation impl) {
minimizedCollections = new MinimizedUnmodifiableCollections(impl);
}
/**
* Read a boolean as a single byte. Might be null.
*/
@Override
public Boolean deserializeBoolean(FastBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1 || VarInt.readVNull(byteData, fieldPosition))
return null;
return byteData.get(fieldPosition) == (byte) 1 ? Boolean.TRUE : Boolean.FALSE;
}
/**
* Read a boolean as a single byte.
*/
@Override
public boolean deserializePrimitiveBoolean(FastBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
return byteData.get(fieldPosition) == (byte) 1;
}
/**
* Read an integer as a variable-byte sequence. After read, the value must be zig-zag decoded. Might be null.
*/
@Override
public Integer deserializeInteger(FastBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1 || VarInt.readVNull(byteData, fieldPosition))
return null;
int value = VarInt.readVInt(byteData, fieldPosition);
return Integer.valueOf((value >>> 1) ^ ((value << 31) >> 31));
}
/**
* Read an integer as a variable-byte sequence. After read, the value must be zig-zag decoded.
*/
@Override
public int deserializePrimitiveInt(FastBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
int value = VarInt.readVInt(byteData, fieldPosition);
return (value >>> 1) ^ ((value << 31) >> 31);
}
/**
* Read a long as a variable-byte sequence. After read, the value must be zig-zag decoded. Might be null.
*/
@Override
public Long deserializeLong(FastBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1 || VarInt.readVNull(byteData, fieldPosition))
return null;
long value = VarInt.readVLong(byteData, fieldPosition);
return Long.valueOf((value >>> 1) ^ ((value << 63) >> 63));
}
/**
* Read a long as a variable-byte sequence. After read, the value must be zig-zag decoded.
*/
@Override
public long deserializePrimitiveLong(FastBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
long value = VarInt.readVLong(byteData, fieldPosition);
return (value >>> 1) ^ ((value << 63) >> 63);
}
/**
* Read a float as a fixed-length sequence of 4 bytes. Might be null.
*/
@Override
public Float deserializeFloat(FastBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1)
return null;
int intBits = readIntBits(byteData, fieldPosition);
if(intBits == NULL_FLOAT_BITS)
return null;
return Float.valueOf(Float.intBitsToFloat(intBits));
}
/**
* Read a float as a fixed-length sequence of 4 bytes.
*/
@Override
public float deserializePrimitiveFloat(FastBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
int intBits = readIntBits(byteData, fieldPosition);
return Float.intBitsToFloat(intBits);
}
private int readIntBits(ByteData byteData, long fieldPosition) {
int intBits = (byteData.get(fieldPosition++) & 0xFF) << 24;
intBits |= (byteData.get(fieldPosition++) & 0xFF) << 16;
intBits |= (byteData.get(fieldPosition++) & 0xFF) << 8;
intBits |= (byteData.get(fieldPosition) & 0xFF);
return intBits;
}
/**
* Read a double as a fixed-length sequence of 8 bytes. Might be null.
*/
@Override
public Double deserializeDouble(FastBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1)
return null;
long longBits = readLongBits(byteData, fieldPosition);
if(longBits == NULL_DOUBLE_BITS)
return null;
return Double.valueOf(Double.longBitsToDouble(longBits));
}
/**
* Read a double as a fixed-length sequence of 8 bytes.
*/
@Override
public double deserializePrimitiveDouble(FastBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
long longBits = readLongBits(byteData, fieldPosition);
return Double.longBitsToDouble(longBits);
}
private long readLongBits(ByteData byteData, long fieldPosition) {
long longBits = (long) (byteData.get(fieldPosition++) & 0xFF) << 56;
longBits |= (long) (byteData.get(fieldPosition++) & 0xFF) << 48;
longBits |= (long) (byteData.get(fieldPosition++) & 0xFF) << 40;
longBits |= (long) (byteData.get(fieldPosition++) & 0xFF) << 32;
longBits |= (long) (byteData.get(fieldPosition++) & 0xFF) << 24;
longBits |= (byteData.get(fieldPosition++) & 0xFF) << 16;
longBits |= (byteData.get(fieldPosition++) & 0xFF) << 8;
longBits |= (byteData.get(fieldPosition) & 0xFF);
return longBits;
}
/**
* Read a String as UTF-8 encoded characters. The length is encoded as a variable-byte integer.
*/
@Override
public String deserializeString(FastBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1 || VarInt.readVNull(byteData, fieldPosition))
return null;
int length = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(length);
return readString(byteData, fieldPosition, length);
}
/**
* Read a sequence of bytes directly from the stream. The length is encoded as a variable-byte integer.
*/
@Override
public byte[] deserializeBytes(FastBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1 || VarInt.readVNull(byteData, fieldPosition))
return null;
int length = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(length);
byte data[] = new byte[length];
for(int i=0;i<length;i++) {
data[i] = byteData.get(fieldPosition++);
}
return data;
}
/**
* Read an Object's ordinal reference as a variable-byte integer. Use the framework to look up the Object by ordinal.
*/
@Override
public <T> T deserializeObject(FastBlobDeserializationRecord rec, String fieldName, Class<T> clazz) {
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1)
return null;
return deserializeObject(rec, fieldPosition, rec.getObjectType(fieldName));
}
/**
* @deprecated use instead deserializeObject(FlatBlobDeserializationRecord rec, String fieldName, Class<T> clazz);
*
* Read an Object's ordinal reference as a variable-byte integer. Use the framework to look up the Object by ordinal.
*/
@Deprecated
@Override
public <T> T deserializeObject(FastBlobDeserializationRecord rec, String fieldName, String typeName, Class<T> clazz) {
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1)
return null;
return deserializeObject(rec, fieldPosition, typeName);
}
private <T> T deserializeObject(FastBlobDeserializationRecord rec, long fieldPosition, String typeName) {
ByteData byteData = rec.getByteData();
if (fieldPosition == -1 || VarInt.readVNull(byteData, fieldPosition))
return null;
int ordinal = VarInt.readVInt(byteData, fieldPosition);
FastBlobTypeDeserializationState<T> deserializationState = ((FastBlobStateEngine) framework).getTypeDeserializationState(typeName);
return deserializationState.get(ordinal);
}
/**
* Read a List as a sequence of ordinals encoded as variable-byte integers. Use the framework to look up each Object by it's ordinals.
*/
@Override
public <T> List<T> deserializeList(FastBlobDeserializationRecord rec, String fieldName, NFTypeSerializer<T> itemSerializer) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1 || VarInt.readVNull(byteData, fieldPosition))
return null;
int length = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(length);
int numElements = VarInt.countVarIntsInRange(byteData, fieldPosition, length);
if(numElements == 0)
return Collections.emptyList();
ListBuilder<T> list = minimizedCollections.createListBuilder();
list.builderInit(numElements);
FastBlobTypeDeserializationState<T> elementDeserializationState = ((FastBlobStateEngine) framework).getTypeDeserializationState(itemSerializer.getName());
for(int i=0;i<numElements;i++) {
if(VarInt.readVNull(byteData, fieldPosition)) {
list.builderSet(i, null);
fieldPosition += 1;
} else {
int ordinal = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(ordinal);
T element = elementDeserializationState.get(ordinal);
if(element != null)
list.builderSet(i, element);
}
}
return minimizedCollections.minimizeList(list.builderFinish());
}
/**
* Read a Set as a sequence of ordinals encoded as gap-encoded variable-byte integers. Use the framework to look up each Object by it's ordinals.
*/
@Override
public <T> Set<T> deserializeSet(FastBlobDeserializationRecord rec, String fieldName, NFTypeSerializer<T> itemSerializer) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1 || VarInt.readVNull(byteData, fieldPosition))
return null;
int length = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(length);
int numElements = VarInt.countVarIntsInRange(byteData, fieldPosition, length);
if(numElements == 0)
return Collections.emptySet();
SetBuilder<T> set = minimizedCollections.createSetBuilder();
set.builderInit(numElements);
FastBlobTypeDeserializationState<T> elementDeserializationState = ((FastBlobStateEngine) framework).getTypeDeserializationState(itemSerializer.getName());
int previousOrdinal = 0;
for(int i=0;i<numElements;i++) {
if(VarInt.readVNull(byteData, fieldPosition)) {
fieldPosition++;
set.builderSet(i, null);
} else {
int ordinal = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(ordinal);
ordinal += previousOrdinal;
previousOrdinal = ordinal;
T element = elementDeserializationState.get(ordinal);
if(element != null)
set.builderSet(i, element);
}
}
return minimizedCollections.minimizeSet(set.builderFinish());
}
/**
* Read a Map as a sequence of key/value pairs encoded as variable-byte integers (value are gap-encoded). Use the framework to look up each Object by it's ordinals.
*/
@Override
public <K, V> Map<K, V> deserializeMap(FastBlobDeserializationRecord rec, String fieldName, NFTypeSerializer<K> keySerializer, NFTypeSerializer<V> valueSerializer) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1 || VarInt.readVNull(byteData, fieldPosition))
return null;
int length = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(length);
int numElements = VarInt.countVarIntsInRange(byteData, fieldPosition, length);
numElements /= 2;
if(numElements == 0)
return Collections.emptyMap();
MapBuilder<K, V> map = minimizedCollections.createMapBuilder();
map.builderInit(numElements);
FastBlobTypeDeserializationState<K> keyDeserializationState = ((FastBlobStateEngine) framework).getTypeDeserializationState(keySerializer.getName());
FastBlobTypeDeserializationState<V> valueDeserializationState = ((FastBlobStateEngine) framework).getTypeDeserializationState(valueSerializer.getName());
populateMap(byteData, fieldPosition, numElements, map, keyDeserializationState, valueDeserializationState);
return minimizedCollections.minimizeMap(map.builderFinish());
}
/**
* Read a SortedMap as a sequence of key/value pairs encoded as variable-byte integers (value are gap-encoded). Use the framework to look up each Object by it's ordinals.
*/
@Override
public <K, V> SortedMap<K, V> deserializeSortedMap(FastBlobDeserializationRecord rec, String fieldName, NFTypeSerializer<K> keySerializer, NFTypeSerializer<V> valueSerializer) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if(fieldPosition == -1 || VarInt.readVNull(byteData, fieldPosition))
return null;
int length = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(length);
int numElements = VarInt.countVarIntsInRange(byteData, fieldPosition, length);
numElements /= 2;
if(numElements == 0)
return minimizedCollections.emptySortedMap();
MapBuilder<K, V> map = minimizedCollections.createSortedMapBuilder();
map.builderInit(numElements);
FastBlobTypeDeserializationState<K> keyDeserializationState = ((FastBlobStateEngine) framework).getTypeDeserializationState(keySerializer.getName());
FastBlobTypeDeserializationState<V> valueDeserializationState = ((FastBlobStateEngine) framework).getTypeDeserializationState(valueSerializer.getName());
populateMap(byteData, fieldPosition, numElements, map, keyDeserializationState, valueDeserializationState);
return minimizedCollections.minimizeSortedMap( (SortedMap<K, V>) map.builderFinish() );
}
private <K, V> void populateMap(ByteData byteData, long fieldPosition, int numElements, MapBuilder<K, V> mapToPopulate, FastBlobTypeDeserializationState<K> keyState, FastBlobTypeDeserializationState<V> valueState) {
int previousValueOrdinal = 0;
for(int i=0;i<numElements;i++) {
K key = null;
V value = null;
boolean undefinedKeyOrValue = false;
if(VarInt.readVNull(byteData, fieldPosition)) {
fieldPosition++;
} else {
int keyOrdinal = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(keyOrdinal);
key = keyState.get(keyOrdinal);
if(key == null)
undefinedKeyOrValue = true;
}
if(VarInt.readVNull(byteData, fieldPosition)) {
fieldPosition++;
} else {
int valueOrdinal = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(valueOrdinal);
valueOrdinal += previousValueOrdinal;
previousValueOrdinal = valueOrdinal;
value = valueState.get(valueOrdinal);
if(value == null)
undefinedKeyOrValue = true;
}
if(!undefinedKeyOrValue)
mapToPopulate.builderPut(i, key, value);
}
}
/**
* Decode a String as a series of VarInts, one per character.<p/>
*
* @param str
* @param out
* @return
* @throws IOException
*/
private final ThreadLocal<char[]> chararr = new ThreadLocal<char[]>();
protected String readString(ByteData data, long position, int length) {
long endPosition = position + length;
char chararr[] = getCharArray();
if(length > chararr.length)
chararr = new char[length];
int count = 0;
while(position < endPosition) {
int c = VarInt.readVInt(data, position);
chararr[count++] = (char)c;
position += VarInt.sizeOfVInt(c);
}
// The number of chars may be fewer than the number of bytes in the serialized data
return new String(chararr, 0, count);
}
private char[] getCharArray() {
char ch[] = chararr.get();
if(ch == null) {
ch = new char[100];
chararr.set(ch);
}
return ch;
}
}
| 8,331 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/FastBlobFrameworkSerializer.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
import com.netflix.zeno.fastblob.record.ByteDataBuffer;
import com.netflix.zeno.fastblob.record.FastBlobSerializationRecord;
import com.netflix.zeno.fastblob.record.VarInt;
import com.netflix.zeno.fastblob.record.schema.FastBlobSchema.FieldType;
import com.netflix.zeno.fastblob.state.FastBlobTypeSerializationState;
import com.netflix.zeno.serializer.FrameworkSerializer;
/**
* Defines the binary serialized representation for each of the "Zeno native" elements in a FastBlob
*
* @author dkoszewnik
*
*/
public class FastBlobFrameworkSerializer extends FrameworkSerializer<FastBlobSerializationRecord> {
public static final int NULL_FLOAT_BITS = Float.floatToIntBits(Float.NaN) + 1;
public static final long NULL_DOUBLE_BITS = Double.doubleToLongBits(Double.NaN) + 1;
public FastBlobFrameworkSerializer(FastBlobStateEngine framework) {
super(framework);
}
/**
* Serialize a primitive element
*/
@Override
public void serializePrimitive(FastBlobSerializationRecord rec, String fieldName, Object value) {
if (value == null) {
return;
}
if (value instanceof Integer) {
serializePrimitive(rec, fieldName, ((Integer) value).intValue());
} else if (value instanceof Long) {
serializePrimitive(rec, fieldName, ((Long) value).longValue());
} else if (value instanceof Float) {
serializePrimitive(rec, fieldName, ((Float) value).floatValue());
} else if (value instanceof Double) {
serializePrimitive(rec, fieldName, ((Double) value).doubleValue());
} else if (value instanceof Boolean) {
serializePrimitive(rec, fieldName, ((Boolean) value).booleanValue());
} else if (value instanceof String) {
serializeString(rec, fieldName, (String) value);
} else if (value instanceof byte[]){
serializeBytes(rec, fieldName, (byte[]) value);
} else {
throw new RuntimeException("Primitive type " + value.getClass().getSimpleName() + " not supported!");
}
}
/**
* Serialize a string as the UTF-8 value
*/
public void serializeString(FastBlobSerializationRecord rec, String fieldName, String value) {
if(value == null)
return;
int position = rec.getSchema().getPosition(fieldName);
FieldType fieldType = rec.getSchema().getFieldType(position);
if(fieldType != FieldType.STRING)
throw new IllegalArgumentException("Attempting to serialize a String as " + fieldType + " in field " + fieldName + ". Carefully check your schema for type " + rec.getSchema().getName() + ".");
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(position);
writeString(value, fieldBuffer);
}
/**
* Serialize an integer, use zig-zag encoding to (probably) get a small positive value, then encode the result as a variable-byte integer.
*/
@Override
public void serializePrimitive(FastBlobSerializationRecord rec, String fieldName, int value) {
int position = rec.getSchema().getPosition(fieldName);
FieldType fieldType = rec.getSchema().getFieldType(position);
if(fieldType != FieldType.INT && fieldType != FieldType.LONG)
throw new IllegalArgumentException("Attempting to serialize an int as " + fieldType + " in field " + fieldName + ". Carefully check your schema for type " + rec.getSchema().getName() + ".");
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(position);
// zig zag encoding
VarInt.writeVInt(fieldBuffer, (value << 1) ^ (value >> 31));
}
/**
* Serialize a long, use zig-zag encoding to (probably) get a small positive value, then encode the result as a variable-byte long.
*/
@Override
public void serializePrimitive(FastBlobSerializationRecord rec, String fieldName, long value) {
int position = rec.getSchema().getPosition(fieldName);
FieldType fieldType = rec.getSchema().getFieldType(position);
if(fieldType != FieldType.LONG)
throw new IllegalArgumentException("Attempting to serialize a long as " + fieldType + " in field " + fieldName + ". Carefully check your schema for type " + rec.getSchema().getName() + ".");
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(position);
// zig zag encoding
VarInt.writeVLong(fieldBuffer, (value << 1) ^ (value >> 63));
}
/**
* Serialize a float into 4 consecutive bytes
*/
@Override
public void serializePrimitive(FastBlobSerializationRecord rec, String fieldName, float value) {
int position = rec.getSchema().getPosition(fieldName);
FieldType fieldType = rec.getSchema().getFieldType(position);
if(fieldType != FieldType.FLOAT) {
throw new IllegalArgumentException("Attempting to serialize a float as " + fieldType + " in field " + fieldName + ". Carefully check your schema for type " + rec.getSchema().getName() + ".");
}
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(position);
int intBits = Float.floatToIntBits(value);
writeFixedLengthInt(fieldBuffer, intBits);
}
/**
* Serialize a special 4-byte long sequence indicating a null Float value.
*/
public static void writeNullFloat(final ByteDataBuffer fieldBuffer) {
writeFixedLengthInt(fieldBuffer, NULL_FLOAT_BITS);
}
/**
* Write 4 consecutive bytes
*/
private static void writeFixedLengthInt(ByteDataBuffer fieldBuffer, int intBits) {
fieldBuffer.write((byte) (intBits >>> 24));
fieldBuffer.write((byte) (intBits >>> 16));
fieldBuffer.write((byte) (intBits >>> 8));
fieldBuffer.write((byte) (intBits));
}
/**
* Serialize a double into 8 consecutive bytes
*/
@Override
public void serializePrimitive(FastBlobSerializationRecord rec, String fieldName, double value) {
int position = rec.getSchema().getPosition(fieldName);
FieldType fieldType = rec.getSchema().getFieldType(position);
if(fieldType != FieldType.DOUBLE)
throw new IllegalArgumentException("Attempting to serialize a double as " + fieldType + " in field " + fieldName + ". Carefully check your schema for type " + rec.getSchema().getName() + ".");
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(position);
long intBits = Double.doubleToLongBits(value);
writeFixedLengthLong(fieldBuffer, intBits);
}
/**
* Serialize a special 8-byte long sequence indicating a null Double value.
*/
public static void writeNullDouble(ByteDataBuffer fieldBuffer) {
writeFixedLengthLong(fieldBuffer, NULL_DOUBLE_BITS);
}
/**
* Write 8 consecutive bytes
*/
private static void writeFixedLengthLong(ByteDataBuffer fieldBuffer, long intBits) {
fieldBuffer.write((byte) (intBits >>> 56));
fieldBuffer.write((byte) (intBits >>> 48));
fieldBuffer.write((byte) (intBits >>> 40));
fieldBuffer.write((byte) (intBits >>> 32));
fieldBuffer.write((byte) (intBits >>> 24));
fieldBuffer.write((byte) (intBits >>> 16));
fieldBuffer.write((byte) (intBits >>> 8));
fieldBuffer.write((byte) (intBits));
}
/**
* Serialize a boolean as a single byte
*/
@Override
public void serializePrimitive(FastBlobSerializationRecord rec, String fieldName, boolean value) {
int position = rec.getSchema().getPosition(fieldName);
FieldType fieldType = rec.getSchema().getFieldType(position);
if(fieldType != FieldType.BOOLEAN)
throw new IllegalArgumentException("Attempting to serialize a boolean as " + fieldType + " in field " + fieldName + ". Carefully check your schema for type " + rec.getSchema().getName() + ".");
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(position);
byte byteValue = value ? (byte) 1 : (byte) 0;
fieldBuffer.write(byteValue);
}
/**
* Serialize a sequence of bytes
*/
@Override
public void serializeBytes(FastBlobSerializationRecord rec, String fieldName, byte[] value) {
if(value == null)
return;
int position = rec.getSchema().getPosition(fieldName);
FieldType fieldType = rec.getSchema().getFieldType(position);
if(fieldType != FieldType.BYTES)
throw new IllegalArgumentException("Attempting to serialize a byte array as " + fieldType + " in field " + fieldName + ". Carefully check your schema for type " + rec.getSchema().getName() + ".");
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(position);
for (int i = 0; i < value.length; i++) {
fieldBuffer.write(value[i]);
}
}
/**
* Recursively call the framework to serialize the speicfied Object, then serialize the resulting ordinal as a variable-byte integer.
*/
@Deprecated
@Override
public void serializeObject(FastBlobSerializationRecord rec, String fieldName, String typeName, Object obj) {
int position = rec.getSchema().getPosition(fieldName);
validateField(fieldName, position);
serializeObject(rec, position, fieldName, typeName, obj);
}
private void validateField(String fieldName, int position) {
if(position == -1) {
throw new IllegalArgumentException("Attempting to serialize non existent field " + fieldName + ".");
}
}
protected void serializeObject(FastBlobSerializationRecord rec, int position, String fieldName, String typeName, Object obj) {
if(obj == null)
return;
FieldType fieldType = rec.getSchema().getFieldType(position);
if(fieldType != FieldType.OBJECT)
throw new IllegalArgumentException("Attempting to serialize an Object as " + fieldType + " in field " + fieldName + ". Carefully check your schema for type " + rec.getSchema().getName() + ".");
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(position);
FastBlobTypeSerializationState<Object> typeSerializationState = ((FastBlobStateEngine) framework).getTypeSerializationState(typeName);
int ordinal = typeSerializationState.add(obj, rec.getImageMembershipsFlags());
VarInt.writeVInt(fieldBuffer, ordinal);
}
@Override
public void serializeObject(FastBlobSerializationRecord rec, String fieldName, Object obj) {
int position = rec.getSchema().getPosition(fieldName);
validateField(fieldName, position);
serializeObject(rec, position, fieldName, rec.getObjectType(fieldName), obj);
}
/**
* Serialize a list.
*
* The framework is used to recursively serialize each of the list's elements, then
* the ordinals are encoded as a sequence of variable-byte integers.
*/
@Override
public <T> void serializeList(FastBlobSerializationRecord rec, String fieldName, String typeName, Collection<T> collection) {
if(collection == null)
return;
int position = rec.getSchema().getPosition(fieldName);
FieldType fieldType = rec.getSchema().getFieldType(position);
if(fieldType != FieldType.LIST && fieldType != FieldType.COLLECTION)
throw new IllegalArgumentException("Attempting to serialize a List as " + fieldType + " in field " + fieldName + ". Carefully check your schema for type " + rec.getSchema().getName() + ".");
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(position);
FastBlobTypeSerializationState<Object> typeSerializationState = ((FastBlobStateEngine) framework).getTypeSerializationState(typeName);
for (T obj : collection) {
if(obj == null) {
VarInt.writeVNull(fieldBuffer);
} else {
int ordinal = typeSerializationState.add(obj, rec.getImageMembershipsFlags());
VarInt.writeVInt(fieldBuffer, ordinal);
}
}
}
/**
* Serialize a set.
*
* The framework is used to recursively serialize each of the set's elements, then
* the ordinals are encoded as a sequence of gap-encoded variable-byte integers.
*/
@Override
public <T> void serializeSet(FastBlobSerializationRecord rec, String fieldName, String typeName, Set<T> set) {
if(set == null)
return;
int position = rec.getSchema().getPosition(fieldName);
FieldType fieldType = rec.getSchema().getFieldType(position);
if(fieldType != FieldType.SET && fieldType != FieldType.COLLECTION)
throw new IllegalArgumentException("Attempting to serialize a Set as " + fieldType + " in field " + fieldName + ". Carefully check your schema for type " + rec.getSchema().getName() + ".");
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(position);
FastBlobTypeSerializationState<Object> typeSerializationState = ((FastBlobStateEngine) framework).getTypeSerializationState(typeName);
int setOrdinals[] = new int[set.size()];
int i = 0;
for (T obj : set) {
if(obj == null) {
setOrdinals[i++] = -1;
} else {
setOrdinals[i++] = typeSerializationState.add(obj, rec.getImageMembershipsFlags());
}
}
if(setOrdinals.length > i)
setOrdinals = Arrays.copyOf(setOrdinals, i);
Arrays.sort(setOrdinals);
int currentOrdinal = 0;
for (i = 0; i < setOrdinals.length; i++) {
if(setOrdinals[i] == -1) {
VarInt.writeVNull(fieldBuffer);
} else {
VarInt.writeVInt(fieldBuffer, setOrdinals[i] - currentOrdinal);
currentOrdinal = setOrdinals[i];
}
}
}
/**
* Serialize a Map.
*
* The framework is used to recursively serialize the map's keys and values, then
* the Map's entries are each encoded as a variable-byte integer for the key's ordinal, and a gap-encoded variable-byte integer for the value's ordinal.
*/
@Override
public <K, V> void serializeMap(FastBlobSerializationRecord rec, String fieldName, String keyTypeName, String valueTypeName, Map<K, V> map) {
if(map == null)
return;
int position = rec.getSchema().getPosition(fieldName);
FieldType fieldType = rec.getSchema().getFieldType(position);
if(fieldType != FieldType.MAP)
throw new IllegalArgumentException("Attempting to serialize a Map as " + fieldType + " in field " + fieldName + ". Carefully check your schema for type " + rec.getSchema().getName() + ".");
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(position);
FastBlobTypeSerializationState<K> keySerializationState = ((FastBlobStateEngine) framework).getTypeSerializationState(keyTypeName);
FastBlobTypeSerializationState<V> valueSerializationState = ((FastBlobStateEngine) framework).getTypeSerializationState(valueTypeName);
long mapEntries[] = new long[map.size()];
int i = 0;
for (Map.Entry<K, V> entry : map.entrySet()) {
int keyOrdinal = -1;
int valueOrdinal = -1;
if(entry.getKey() != null)
keyOrdinal = keySerializationState.add(entry.getKey(), rec.getImageMembershipsFlags());
if(entry.getValue() != null)
valueOrdinal = valueSerializationState.add(entry.getValue(), rec.getImageMembershipsFlags());
mapEntries[i++] = ((long)valueOrdinal << 32) | (keyOrdinal & 0xFFFFFFFFL);
}
if(mapEntries.length > i)
mapEntries = Arrays.copyOf(mapEntries, i);
Arrays.sort(mapEntries);
int currentValueOrdinal = 0;
for (i = 0; i < mapEntries.length ; i++) {
int keyOrdinal = (int) mapEntries[i];
int valueOrdinal = (int) (mapEntries[i] >> 32);
if(keyOrdinal == -1)
VarInt.writeVNull(fieldBuffer);
else
VarInt.writeVInt(fieldBuffer, keyOrdinal);
if(valueOrdinal == -1) {
VarInt.writeVNull(fieldBuffer);
} else {
VarInt.writeVInt(fieldBuffer, valueOrdinal - currentValueOrdinal);
currentValueOrdinal = valueOrdinal;
}
}
}
/**
* Encode a String as a series of VarInts, one per character.<p/>
*
* @param str
* @param out
* @return
* @throws IOException
*/
protected void writeString(String str, ByteDataBuffer out) {
for(int i=0;i<str.length();i++) {
VarInt.writeVInt(out, str.charAt(i));
}
}
}
| 8,332 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/FastBlobImageUtils.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob;
/**
* Utils for packing boolean arrays to integer, this is typically used for
* passing in image information
*
* @author timurua
*
*/
public class FastBlobImageUtils {
/**
* Packs boolean array to integer, the booleans with greater indices go
* first
*
* @param a
* @return
*/
public static long toLong(boolean... a) {
if (a.length >= 64) {
throw new IllegalArgumentException("while packing boolean array in int, the array length should be less than 32");
}
int n = 0;
for (int i = (a.length - 1); i >= 0; --i) {
n = (n << 1) | (a[i] ? 1 : 0);
}
return n;
}
public static final long ONE_TRUE = toLong(true);
private static final long[] ALL_TRUE_PRIVATE = new long[64];
static {
for (int i = 0; i < 64; i++) {
boolean[] a = new boolean[i];
for (int j = 0; j < i; j++) {
a[j] = true;
}
ALL_TRUE_PRIVATE[i] = toLong(a);
}
}
/**
* Returns the integer which corresponds to the boolean array of specified
* length, where all the items are set to true
*
* @param count
* @return
*/
public static final long getAllTrue(int count) {
if (count >= 64) {
throw new IllegalArgumentException("while packing boolean array in int, the array length should be less than 32");
}
return ALL_TRUE_PRIVATE[count];
}
}
| 8,333 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/OrdinalMapping.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
public class OrdinalMapping {
Map<String, StateOrdinalMapping> stateOrdinalMappings;
public OrdinalMapping() {
stateOrdinalMappings = new ConcurrentHashMap<String, StateOrdinalMapping>();
}
public StateOrdinalMapping createStateOrdinalMapping(String type, int maxOriginalOrdinal) {
StateOrdinalMapping stateOrdinalMapping = new StateOrdinalMapping(maxOriginalOrdinal);
stateOrdinalMappings.put(type, stateOrdinalMapping);
return stateOrdinalMapping;
}
public StateOrdinalMapping getStateOrdinalMapping(String type) {
return stateOrdinalMappings.get(type);
}
}
| 8,334 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/FastBlobHeapFriendlyClientFrameworkSerializer.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob;
import java.util.Arrays;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
import com.netflix.zeno.fastblob.record.ByteDataBuffer;
import com.netflix.zeno.fastblob.record.FastBlobSerializationRecord;
import com.netflix.zeno.fastblob.record.VarInt;
import com.netflix.zeno.fastblob.record.schema.FastBlobSchema.FieldType;
import com.netflix.zeno.fastblob.state.FastBlobTypeDeserializationState;
/**
* Rather than adding objects to a serialization state and having the ByteArrayOrdinalMap assign ordinals,
* during a double snapshot refresh, we must determine the ordinal of an object based on its deserialization state's
* == mapping. <p/>
*
* This class overrides the appropriate methods to inject this functionality.
*
* @author dkoszewnik
*
*/
public class FastBlobHeapFriendlyClientFrameworkSerializer extends FastBlobFrameworkSerializer {
private boolean checkSerializationIntegrity = false;
private boolean serializationIntegrityFlawed = false;
public FastBlobHeapFriendlyClientFrameworkSerializer(FastBlobStateEngine framework) {
super(framework);
}
@Override
protected void serializeObject(FastBlobSerializationRecord rec, int position, String fieldName, String typeName, Object obj) {
if(obj == null)
return;
FieldType fieldType = rec.getSchema().getFieldType(position);
if(fieldType != FieldType.OBJECT)
throw new IllegalArgumentException("Attempting to serialize an Object as " + fieldType + " in field " + fieldName + ". Carefully check your schema for type " + rec.getSchema().getName() + ".");
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(position);
FastBlobTypeDeserializationState<Object> deserializationState = ((FastBlobStateEngine) framework).getTypeDeserializationState(typeName);
int ordinal = findObject(deserializationState, obj, fieldName);
VarInt.writeVInt(fieldBuffer, ordinal);
}
@Override
public <T> void serializeList(FastBlobSerializationRecord rec, String fieldName, String typeName, Collection<T> collection) {
if(collection == null)
return;
int position = rec.getSchema().getPosition(fieldName);
FieldType fieldType = rec.getSchema().getFieldType(position);
if(fieldType != FieldType.LIST && fieldType != FieldType.COLLECTION)
throw new IllegalArgumentException("Attempting to serialize a List as " + fieldType + " in field " + fieldName);
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(position);
FastBlobTypeDeserializationState<Object> deserializationState = ((FastBlobStateEngine) framework).getTypeDeserializationState(typeName);
for (T obj : collection) {
if(obj == null) {
VarInt.writeVNull(fieldBuffer);
} else {
int ordinal = findObject(deserializationState, obj, fieldName);
VarInt.writeVInt(fieldBuffer, ordinal);
}
}
}
@Override
public <T> void serializeSet(FastBlobSerializationRecord rec, String fieldName, String typeName, Set<T> set) {
if(set == null)
return;
int position = rec.getSchema().getPosition(fieldName);
FieldType fieldType = rec.getSchema().getFieldType(position);
if(fieldType != FieldType.SET && fieldType != FieldType.COLLECTION)
throw new IllegalArgumentException("Attempting to serialize a Set as " + fieldType + " in field " + fieldName);
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(position);
FastBlobTypeDeserializationState<Object> deserializationState = ((FastBlobStateEngine) framework).getTypeDeserializationState(typeName);
int setOrdinals[] = new int[set.size()];
int i = 0;
for (T obj : set) {
setOrdinals[i++] = findObject(deserializationState, obj, fieldName);
}
Arrays.sort(setOrdinals);
int currentOrdinal = 0;
for (i = 0; i < setOrdinals.length; i++) {
VarInt.writeVInt(fieldBuffer, setOrdinals[i] - currentOrdinal);
currentOrdinal = setOrdinals[i];
}
}
@Override
public <K, V> void serializeMap(FastBlobSerializationRecord rec, String fieldName, String keyTypeName, String valueTypeName, Map<K, V> map) {
if(map == null)
return;
int position = rec.getSchema().getPosition(fieldName);
FieldType fieldType = rec.getSchema().getFieldType(position);
if(fieldType != FieldType.MAP)
throw new IllegalArgumentException("Attempting to serialize a Map as " + fieldType + " in field " + fieldName);
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(position);
FastBlobTypeDeserializationState<Object> keyDeserializationState = ((FastBlobStateEngine) framework).getTypeDeserializationState(keyTypeName);
FastBlobTypeDeserializationState<Object> valueDeserializationState = ((FastBlobStateEngine) framework).getTypeDeserializationState(valueTypeName);
long mapEntries[] = new long[map.size()];
int i = 0;
for (Map.Entry<K, V> entry : map.entrySet()) {
int keyOrdinal = -1;
int valueOrdinal = -1;
if(entry.getKey() != null)
keyOrdinal = findObject(keyDeserializationState, entry.getKey(), fieldName + "(key)");
if(entry.getValue() != null)
valueOrdinal = findObject(valueDeserializationState, entry.getValue(), fieldName + "(value)");
mapEntries[i++] = ((long)valueOrdinal << 32) | (keyOrdinal & 0xFFFFFFFFL);
}
Arrays.sort(mapEntries);
int currentValueOrdinal = 0;
for (i = 0; i < mapEntries.length; i++) {
int keyOrdinal = (int) mapEntries[i];
int valueOrdinal = (int) (mapEntries[i] >> 32);
if(keyOrdinal == -1)
VarInt.writeVNull(fieldBuffer);
else
VarInt.writeVInt(fieldBuffer, keyOrdinal);
if(valueOrdinal == -1) {
VarInt.writeVNull(fieldBuffer);
} else {
VarInt.writeVInt(fieldBuffer, valueOrdinal - currentValueOrdinal);
}
currentValueOrdinal = valueOrdinal;
}
}
private <T> int findObject(FastBlobTypeDeserializationState<Object> deserializationState, T obj, String fieldName) {
int ordinal = deserializationState.find(obj);
if(checkSerializationIntegrity && ordinal < 0) {
serializationIntegrityFlawed = true;
}
return ordinal;
}
public void setCheckSerializationIntegrity(boolean warn) {
this.checkSerializationIntegrity = warn;
}
public boolean isSerializationIntegrityFlawed() {
return serializationIntegrityFlawed;
}
public void clearSerializationIntegrityFlawedFlag() {
serializationIntegrityFlawed = false;
}
}
| 8,335 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/FastBlobStateEngine.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob;
import com.netflix.zeno.fastblob.record.VarInt;
import com.netflix.zeno.fastblob.state.ByteArrayOrdinalMap;
import com.netflix.zeno.fastblob.state.FastBlobTypeDeserializationState;
import com.netflix.zeno.fastblob.state.FastBlobTypeSerializationState;
import com.netflix.zeno.fastblob.state.TypeDeserializationStateListener;
import com.netflix.zeno.serializer.NFTypeSerializer;
import com.netflix.zeno.serializer.SerializationFramework;
import com.netflix.zeno.serializer.SerializerFactory;
import com.netflix.zeno.util.SimultaneousExecutor;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
/**
* This is the SerializationFramework for the second-generation blob.<p/>
*
* The blob is a serialized representation of all data conforming to an object model (defined by a {@link SerializerFactory})
* in a single binary file.<p/>
*
* This class is the main interface for both serialization, as well as deserialization, of FastBlob data. For detailed
* usage of the FastBlobStateEngine, please see <a href="https://github.com/Netflix/zeno/wiki">the Zeno documentation</a><p/>
*
* This class holds references to the "TypeSerializationStates", which are responsible for assigning and maintaining the mappings between
* serialized representations of Objects and "ordinals" (@see {@link ByteArrayOrdinalMap}).<p/>
*
* This class also holds references to "TypeDeserializationStates", which are responsible for assigning and maintaining the reverse mapping
* between ordinals and Objects.<p/>
*
* This class also maintains an ordered list of SerializationStateConfiguration objects, which define the specifications for object
* membership within images.<p/>
*
* <a href="https://docs.google.com/presentation/d/1GOIsGUpPVpRX_rY2GzHVCJ2lmf2eb42N3iqlKKcL5wc/edit?usp=sharing">Original presentation for the blob:</a><p/>
*
* This class has a lifecycle during which it alternates between two states:<p/>
* <ol>
* <li>Safe to add objects, but not safe to write contained objects to a stream.</li>
* <li>Not safe to add objects, but safe to write contained objects to a stream.</li>
* </ol>
*
* Initially the object will be in state (1).<br/>
* From state (1), if prepareForWrite() is called, it will be transitioned to state (2).<br/>
* From state (2), calling prepareForNextCycle() will transition back to state (1).<br/>
*
* @see https://github.com/Netflix/zeno/wiki
*
* @author dkoszewnik
*
*/
public class FastBlobStateEngine extends SerializationFramework {
/// all serialization and deserialization states, keyed by their unique names
private final Map<String, FastBlobTypeSerializationState<?>> serializationTypeStates;
private final Map<String, FastBlobTypeDeserializationState<?>> deserializationTypeStates;
/// The serialization states, ordered such that all dependencies come *before* their dependents
public final List<FastBlobTypeSerializationState<?>> orderedSerializationStates;
private final boolean shouldUseObjectIdentityOrdinalCaching;
private final int numberOfConfigurations;
private String latestVersion;
private Map<String,String> headerTags = new HashMap<String, String>();
private int maxSingleObjectLength;
private final long addToAllImagesFlags;
public FastBlobStateEngine(SerializerFactory factory) {
this(factory, 1, true);
}
public FastBlobStateEngine(SerializerFactory factory, int numberOfConfigurations) {
this(factory, numberOfConfigurations, true);
}
public FastBlobStateEngine(SerializerFactory factory, int numberOfConfigurations, boolean shouldUseObjectIdentityOrdinalCaching) {
super(factory);
this.shouldUseObjectIdentityOrdinalCaching = shouldUseObjectIdentityOrdinalCaching;
this.frameworkSerializer = new FastBlobFrameworkSerializer(this);
this.frameworkDeserializer = new FastBlobFrameworkDeserializer(this);
this.serializationTypeStates = new HashMap<String, FastBlobTypeSerializationState<?>>();
this.deserializationTypeStates = new HashMap<String, FastBlobTypeDeserializationState<?>>();
this.orderedSerializationStates = new ArrayList<FastBlobTypeSerializationState<?>>();
this.numberOfConfigurations = numberOfConfigurations;
addToAllImagesFlags = FastBlobImageUtils.getAllTrue(numberOfConfigurations);
createSerializationStates();
}
protected void createSerializationStates() {
for(NFTypeSerializer<?> serializer : getOrderedSerializers()) {
createSerializationState(serializer);
}
}
private <T> void createSerializationState(NFTypeSerializer<T> serializer) {
FastBlobTypeSerializationState<T> serializationState = new FastBlobTypeSerializationState<T>(serializer, numberOfConfigurations,
shouldUseObjectIdentityOrdinalCaching);
serializationTypeStates.put(serializer.getName(), serializationState);
orderedSerializationStates.add(serializationState);
deserializationTypeStates.put(serializer.getName(), new FastBlobTypeDeserializationState<T>(serializer));
}
/**
* Returns the images which can be generated from this FastBlobStateEngine. The ordering here is important.<p/>
*
* The index at which a SerializationStateConfiguration is returned must be used to specify whether or not
* each object added to the FastBlobStateEngine is included in that image (see add()).
*
*/
public int getNumberOfConfigurations() {
return numberOfConfigurations;
}
/**
* Add an object to this state engine. This object will be added to all images.
*/
public void add(String type, Object obj) {
add(type, obj, addToAllImagesFlags);
}
/**
* Add an object to this state engine. The images to which this object
* should be added are specified with the addToImageFlags[] array of
* booleans.
* <p/>
*
* For example, if the FastBlobStateEngine can produce 3 images,
* getImageConfigurations() will return a List of size 3.
* <p/>
*
* If an object added to this state engine should be contained in the images
* at index 1, but not at index 0 and 2, then the boolean[] passed into this
* method should be {false, true, false}.
*
*/
@Deprecated
public void add(String type, Object obj, boolean[] addToImageFlags) {
add(type, obj, FastBlobImageUtils.toLong(addToImageFlags));
}
/**
* Add an object to this state engine. The images to which this object should be added are specified with the addToImageFlags[] array of booleans.<p/>
*
* For example, if the FastBlobStateEngine can produce 3 images, getImageConfigurations() will return a List of size 3.<p/>
*
* If an object added to this state engine should be contained in the images at index 1, but not at index 0 and 2,
* then the boolean[] passed into this method should be {false, true, false}.
*
*/
public void add(String type, Object obj, long addToImageFlags) {
FastBlobTypeSerializationState<Object> typeSerializationState = getTypeSerializationState(type);
if(typeSerializationState == null) {
throw new RuntimeException("Unable to find type. Ensure there exists an NFTypeSerializer with the name: " + type);
}
typeSerializationState.add(obj, addToImageFlags);
}
/**
* Add a {@link TypeDeserializationStateListener} to the specified type
*/
public <T> void setTypeDeserializationStateListener(String type, TypeDeserializationStateListener<T> listener) {
FastBlobTypeDeserializationState<T> typeState = getTypeDeserializationState(type);
if(typeState == null) {
throw new RuntimeException("Unable to find type. Ensure there exists an NFTypeSerializer with the name: " + type);
}
typeState.setListener(listener);
}
/**
* @return the FastBlobSerializationStates in the order in which they should appear in the FastBlob stream.<p/>
*
* See https://docs.google.com/presentation/d/1G98w4W0Nb8MzBvglVCwd698aUli4NOFEin60lGZeJos/edit?usp=sharing for a
* detailed explanation of why this ordering exists and how it is derived.
*/
public List<FastBlobTypeSerializationState<?>> getOrderedSerializationStates() {
return orderedSerializationStates;
}
/**
* @return The unmodifiableSet of names
*/
public Set<String> getSerializerNames() {
return Collections.unmodifiableSet(serializationTypeStates.keySet());
}
@SuppressWarnings("unchecked")
public <T> FastBlobTypeSerializationState<T> getTypeSerializationState(String name) {
return (FastBlobTypeSerializationState<T>) serializationTypeStates.get(name);
}
@SuppressWarnings("unchecked")
public <T> FastBlobTypeDeserializationState<T> getTypeDeserializationState(String name) {
return (FastBlobTypeDeserializationState<T>) deserializationTypeStates.get(name);
}
/**
* Create a lookup array (from ordinal to serialized byte data) for each FastBlobSerializationState.<p/>
*
* Determines and remembers the maximum single object length, in bytes.
*/
public void prepareForWrite() {
maxSingleObjectLength = 0;
for(FastBlobTypeSerializationState<?> state : orderedSerializationStates) {
int stateMaxLength = state.prepareForWrite();
if(stateMaxLength > maxSingleObjectLength) {
maxSingleObjectLength = stateMaxLength;
}
}
}
public void prepareForNextCycle() {
for(FastBlobTypeSerializationState<?> state : orderedSerializationStates) {
state.prepareForNextCycle();
}
}
public int getMaxSingleObjectLength() {
return maxSingleObjectLength;
}
public String getLatestVersion() {
return latestVersion;
}
public void setLatestVersion(String latestVersion) {
this.latestVersion = latestVersion;
}
public Map<String,String> getHeaderTags() {
return headerTags;
}
public void addHeaderTags(Map<String,String> headerTags) {
this.headerTags.putAll(headerTags);
}
public void addHeaderTag(String tag, String value) {
this.headerTags.put(tag, value);
}
public String getHeaderTag(String tag) {
return this.headerTags.get(tag);
}
/// arbitrary version number. Change this when incompatible modifications are made to the state engine
/// serialization format.
private final int STATE_ENGINE_SERIALIZATION_FORMAT_VERSION = 999996;
/**
* Serialize a previous serialization state from the stream. The deserialized state engine will be in exactly the same state as the serialized state engine.
*/
public void serializeTo(OutputStream os) throws IOException {
DataOutputStream dos = new DataOutputStream(os);
dos.writeInt(STATE_ENGINE_SERIALIZATION_FORMAT_VERSION);
dos.writeUTF(latestVersion);
dos.writeShort(headerTags.size());
for(Map.Entry<String,String> headerTag : headerTags.entrySet()) {
dos.writeUTF(headerTag.getKey());
dos.writeUTF(headerTag.getValue());
}
VarInt.writeVInt(dos, numberOfConfigurations);
VarInt.writeVInt(dos, orderedSerializationStates.size());
for(FastBlobTypeSerializationState<?> typeState : orderedSerializationStates) {
dos.writeUTF(typeState.getSchema().getName());
typeState.serializeTo(dos);
}
}
/**
* Reinstantiate a StateEngine from the stream.
*/
public void deserializeFrom(InputStream is) throws IOException {
DataInputStream dis = new DataInputStream(is);
if(dis.readInt() != STATE_ENGINE_SERIALIZATION_FORMAT_VERSION) {
throw new RuntimeException("Refusing to reinstantiate FastBlobStateEngine due to serialized version mismatch.");
}
latestVersion = dis.readUTF();
int numHeaderTagEntries = dis.readShort();
headerTags.clear();
headerTags = new HashMap<String, String>();
for(int i=0;i<numHeaderTagEntries;i++) {
headerTags.put(dis.readUTF(), dis.readUTF());
}
int numConfigs = VarInt.readVInt(dis);
int numStates = VarInt.readVInt(dis);
for(int i=0;i<numStates;i++) {
String typeName = dis.readUTF();
FastBlobTypeSerializationState<?> typeState = serializationTypeStates.get(typeName);
if(typeState != null) {
typeState.deserializeFrom(dis, numConfigs);
} else {
FastBlobTypeSerializationState.discardSerializedTypeSerializationState(dis, numConfigs);
}
}
}
/**
* Copy all serialization states (except those specified) into the provided State Engine.<p>
*
* This is used during FastBlobStateEngine combination.<p/>
*
* The "ignoreSerializers" parameter is used for types which must be combined using business logic, instead
* of a pass-through copy<p>
*
* Thread safety: This cannot be safely called concurrently with add() operations to *this* state engine.<p>
*
* @param otherStateEngine
* @param ignoreSerializers
*
* @return the OrdinalMapping between this FastBlobStateEngine and the state engine to which this was copied.
*/
public OrdinalMapping copySerializationStatesTo(FastBlobStateEngine otherStateEngine, Collection<String> ignoreSerializers) {
OrdinalMapping ordinalMapping = new OrdinalMapping();
for(FastBlobTypeSerializationState<?> serializationState : getOrderedSerializationStates()) {
String serializerName = serializationState.serializer.getName();
if(!ignoreSerializers.contains(serializerName)) {
serializationState.copyTo(otherStateEngine.getTypeSerializationState(serializerName), ordinalMapping);
}
}
return ordinalMapping;
}
/**
* Copy only the specified serialization states, in the specified order, into the provided State Engine.<p>
*
* For those types which are referenced by the specified serializers, but not combined
* in this operation, use the provided OrdinalMapping.<p>
*
* The provided ordinal mapping will be updated with the new mappings created by this operation<p>
*
* This is used during FastBlobStateEngine combination, for those types which reference states that
* must be combined using business logic (instead of a pass-through copy).<p>
*
* @param otherStateEngine
* @param whichSerializers
* @param ordinalMapping
*/
public void copySpecificSerializationStatesTo(FastBlobStateEngine otherStateEngine, List<String> whichSerializers, OrdinalMapping ordinalMapping) {
for(String serializerName : whichSerializers) {
FastBlobTypeSerializationState<?> serializationState = getTypeSerializationState(serializerName);
serializationState.copyTo(otherStateEngine.getTypeSerializationState(serializerName), ordinalMapping);
}
}
/*
* Copy all the serialization states to provided state engine
*/
public void copyTo(FastBlobStateEngine otherStateEngine) {
copyTo(otherStateEngine, Collections.<String> emptyList());
}
/*
* Copy serialization states whose serializer's name doesn't match the ones provided in the ignore collection
*/
public void copyTo(FastBlobStateEngine otherStateEngine, Collection<String> topLevelSerializersToIgnore) {
fillDeserializationStatesFromSerializedData();
SimultaneousExecutor executor = new SimultaneousExecutor(4.0d);
List<String> topLevelSerializersToCopy = new ArrayList<String>();
for(NFTypeSerializer<?> serializer : getTopLevelSerializers()) {
String serializerName = serializer.getName();
if(!topLevelSerializersToIgnore.contains(serializerName)) {
topLevelSerializersToCopy.add(serializer.getName());
}
}
CountDownLatch latch = new CountDownLatch(executor.getMaximumPoolSize() * topLevelSerializersToCopy.size());
for(String serializerizerName : topLevelSerializersToCopy) {
executor.submit(getFillSerializationStateRunnable(otherStateEngine, serializerizerName, executor, latch));
}
try {
latch.await();
} catch (InterruptedException ie) {
ie.printStackTrace();
}
executor.shutdown();
}
private Runnable getFillSerializationStateRunnable(final FastBlobStateEngine otherStateEngine,
final String serializerName, final SimultaneousExecutor executor, final CountDownLatch latch) {
return new Runnable() {
@Override
public void run() {
fillSerializationState(otherStateEngine, serializerName, executor, latch);
}
};
}
private void fillSerializationState(FastBlobStateEngine otherStateEngine,
String serializerName, final SimultaneousExecutor executor, CountDownLatch latch) {
int threadsSize = executor.getMaximumPoolSize();
for(int i=0;i<threadsSize;i++) {
executor.submit(getFillSerializationStatesRunnable(otherStateEngine, serializerName, threadsSize, latch, i));
}
}
private Runnable getFillSerializationStatesRunnable(final FastBlobStateEngine otherStateEngine,
final String serializerName, final int numThreads, final CountDownLatch latch, final int threadNumber) {
return new Runnable() {
@Override
public void run() {
copyObjects(otherStateEngine, serializerName, numThreads, threadNumber);
latch.countDown();
}
};
}
/**
* Explode the data from the serialization states into the deserialization states.<p/>
*
* This is used during FastBlobStateEngine combination.<p/>
*
*/
public void fillDeserializationStatesFromSerializedData() {
for(FastBlobTypeSerializationState<?> serializationState : getOrderedSerializationStates()) {
String serializer = serializationState.getSchema().getName();
serializationState.fillDeserializationState(getTypeDeserializationState(serializer));
}
}
/**
* Explode the data from the serialization states into the deserialization states for the specified serializers.<p/>
*
* This is used during FastBlobStateEngine combination.<p/>
*
* @param includeSerializers
*/
public void fillDeserializationStatesFromSerializedData(Collection<String> includeSerializers) {
for(FastBlobTypeSerializationState<?> serializationState : getOrderedSerializationStates()) {
String serializer = serializationState.getSchema().getName();
if(includeSerializers.contains(serializer)) {
serializationState.fillDeserializationState(getTypeDeserializationState(serializer));
}
}
}
public void fillSerializationStatesFromDeserializedData() {
for(NFTypeSerializer<?> serializer : getTopLevelSerializers()) {
FastBlobTypeDeserializationState<?> state = getTypeDeserializationState(serializer.getName());
state.fillSerializationState(this);
}
}
public void prepareForDoubleSnapshotRefresh() {
this.frameworkSerializer = new FastBlobHeapFriendlyClientFrameworkSerializer(this);
}
public void cleanUpAfterDoubleSnapshotRefresh() {
for(FastBlobTypeDeserializationState<?> state : deserializationTypeStates.values()) {
state.clearIdentityOrdinalMap();
}
}
private void copyObjects(final FastBlobStateEngine otherStateEngine, final String serializerName,
final int numThreads, final int threadNumber) {
FastBlobTypeDeserializationState<?> typeDeserializationState = getTypeDeserializationState(serializerName);
int maxOrdinal = typeDeserializationState.maxOrdinal() + 1;
if(maxOrdinal < threadNumber) {
return;
}
FastBlobTypeSerializationState<?> typeSerializationState = getTypeSerializationState(serializerName);
boolean imageMembershipsFlags[] = new boolean[numberOfConfigurations];
for(int i=threadNumber;i<maxOrdinal;i+=numThreads) {
Object obj = typeDeserializationState.get(i);
if(obj != null) {
for(int imageIndex=0;imageIndex<numberOfConfigurations;imageIndex++) {
imageMembershipsFlags[imageIndex] = typeSerializationState.getImageMembershipBitSet(imageIndex).get(i);
}
otherStateEngine.add(typeSerializationState.getSchema().getName(), obj, FastBlobImageUtils.toLong(imageMembershipsFlags));
}
}
}
}
| 8,336 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/StateOrdinalMapping.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob;
public class StateOrdinalMapping {
private final int ordinalMap[];
public StateOrdinalMapping(int maxOriginalOrdinal) {
ordinalMap = new int[maxOriginalOrdinal + 1];
}
public void setMappedOrdinal(int originalOrdinal, int mappedOrdinal) {
ordinalMap[originalOrdinal] = mappedOrdinal;
}
public int getMappedOrdinal(int originalOrdinal) {
return ordinalMap[originalOrdinal];
}
}
| 8,337 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/record/ByteDataBuffer.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.record;
/**
* Writes data to a SegmentedByteArray, tracking the index to which it writes.
*
* @author dkoszewnik
*
*/
public class ByteDataBuffer {
private final SegmentedByteArray buf;
private long position;
public ByteDataBuffer() {
this(256);
}
public ByteDataBuffer(int startSize) {
int log2OfSize = 32 - Integer.numberOfLeadingZeros(startSize - 1);
buf = new SegmentedByteArray(log2OfSize - 1);
}
public void write(byte b) {
buf.set(position++, b);
}
public void reset() {
position = 0;
}
public void setPosition(long position) {
this.position = position;
}
public long length() {
return position;
}
public void copyTo(ByteDataBuffer other) {
other.buf.copy(buf, 0, other.position, position);
other.position += position;
}
public void copyFrom(ByteDataBuffer other) {
buf.copy(other.buf, 0, position, other.position);
position += other.position;
}
public void copyFrom(ByteData data, long startPosition, int length) {
buf.copy(data, startPosition, position, length);
position += length;
}
public void copyFrom(SegmentedByteArray data, long startPosition, int length) {
buf.copy(data, startPosition, position, length);
position += length;
}
public byte get(long index) {
return buf.get(index);
}
public SegmentedByteArray getUnderlyingArray() {
return buf;
}
}
| 8,338 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/record/StreamingByteData.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.record;
import java.io.IOException;
import java.io.InputStream;
/**
* This class buffers data from an InputStream. The buffered data can be accessed randomly within
* a predefined window before or after the greatest previously accessed byte index.<p/>
*
* When using this class, some bytes before and after the maximum byte previously accessed
* are available via the get() method inherited from ByteData.<p/>
*
* Specifically, 2^(log2OfBufferSegmentLength) bytes, both before and after the maximum byte
* previously accessed by either the stream's read() or ByteData's get() method are available.<p/>
*
* This is useful when reading the FastBlob. Although records are pulled from the FastBlob
* stream one at a time, the FastBlobDeserializationRecord requires random access to the bytes
* in the record.<p/>
*
* The FastBlobWriter records the ceil(log2(maxLength)) of the individual records contained in the FastBlob.
* Upon deserialization, this value is read and passed to the constructor of this class to set the buffer length.
* This guarantees that the reader can access the entire record while it is being read (because the maximum byte
* accessed while deserializing the record will at most be the last byte of the record).
*
* @author dkoszewnik
*
*/
public class StreamingByteData extends InputStream implements ByteData {
private final InputStream underlyingStream;
private final int bufferSegmentLength;
private final int log2OfBufferSegmentLength;
private final int bufferSegmentLengthMask;
private long bufferStartPosition;
private final byte buf[][];
private long eofPosition = Long.MAX_VALUE;
private long currentStreamPosition;
public StreamingByteData(InputStream in, int log2OfBufferSegmentLength) {
this.underlyingStream = in;
this.log2OfBufferSegmentLength = log2OfBufferSegmentLength;
this.bufferSegmentLength = 1 << log2OfBufferSegmentLength;
this.bufferSegmentLengthMask = bufferSegmentLength - 1;
this.buf = new byte[4][];
for(int i=0;i<4;i++) {
buf[i] = new byte[bufferSegmentLength];
if(eofPosition == Long.MAX_VALUE)
fillArray(buf[i], (bufferSegmentLength * i));
}
}
/**
* This method provides random-access to the stream data. To guarantee availability, the position should be no less than
* the greatest previously accessed byte (via either get() or read()) minus 2^log2OfBufferSegmentLength, and no more
* than the greatest previously access byte plus 2^log2OfBufferSegmentLength.
*
* @param position is the index into the stream data.
* @return the byte at position.
*/
@Override
public byte get(long position) {
// subtract the buffer start position to get the position in the buffer
position -= bufferStartPosition;
// if this position will be reading from the last buffer segment
if(position >= (bufferSegmentLength * 3)) {
// move the segments down and fill another segment.
fillNewBuffer();
position -= bufferSegmentLength;
}
if((int)(position >>> log2OfBufferSegmentLength) < 0 || (int)(position & bufferSegmentLengthMask) < 0)
System.out.println("found a bug");
// return the appropriate byte out of the buffer
return buf[(int)(position >>> log2OfBufferSegmentLength)][(int)(position & bufferSegmentLengthMask)];
}
@Override
public int read() throws IOException {
// if there are no more bytes, return -1
if(currentStreamPosition >= eofPosition)
return -1;
// use the get method to get the appropriate byte from the stream
return (int)get(currentStreamPosition++);
}
public long currentStreamPosition() {
return currentStreamPosition;
}
/**
* If bytes should be accessed via the get() method only, this method
* can be used to skip them in the stream (not return from read()).
*
* @param incrementBy how many bytes to "skip", or omit from calls to read()
*/
public void incrementStreamPosition(int incrementBy) {
currentStreamPosition += incrementBy;
}
/**
* Close the underlying stream
*/
@Override
public void close() throws IOException {
underlyingStream.close();
}
/**
* Discards the oldest buffer and fills a new buffer
*/
private void fillNewBuffer() {
byte temp[] = buf[0];
buf[0] = buf[1];
buf[1] = buf[2];
buf[2] = buf[3];
buf[3] = temp;
bufferStartPosition += bufferSegmentLength;
if(eofPosition == Long.MAX_VALUE)
fillArray(buf[3], bufferStartPosition + (bufferSegmentLength * 3));
}
/**
* Fills a byte array with data from the underlying stream
*/
private void fillArray(byte arr[], long segmentStartByte) {
try {
int n = 0;
while (n < arr.length) {
int count = underlyingStream.read(arr, n, arr.length - n);
// if we have reached the end of the stream, record the byte position at which the stream ends.
if (count < 0) {
eofPosition = segmentStartByte + n;
return;
}
n += count;
}
} catch (IOException e) {
throw new RuntimeException("Unable to read from stream", e);
}
}
}
| 8,339 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/record/ByteData.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.record;
/**
* Interface implemented by data structures which may be used to access FastBlob record data:
*
* {@link SegmentedByteArray}
* {@link StreamingByteData}
*
* @author dkoszewnik
*
*/
public interface ByteData {
byte get(long position);
}
| 8,340 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/record/SegmentedByteArray.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.record;
import java.io.IOException;
import java.io.OutputStream;
import java.io.RandomAccessFile;
import java.util.Arrays;
/**
* A segmented byte array can grow without allocating successively larger blocks and copying memory.<p/>
*
* Segment length is always a power of two so that the location of a given index can be found with mask and shift operations.<p/>
*
* Conceptually this can be thought of as a single byte array of undefined length. The currently allocated buffer will always be
* a multiple of the size of the segments. The buffer will grow automatically when a byte is written to an index greater than the
* currently allocated buffer.
*
* @author dkoszewnik
*
*/
public class SegmentedByteArray implements ByteData {
private byte[][] segments;
private final int log2OfSegmentSize;
private final int bitmask;
public SegmentedByteArray(int log2OfSegmentSize) {
this.segments = new byte[2][];
this.log2OfSegmentSize = log2OfSegmentSize;
this.bitmask = (1 << log2OfSegmentSize) - 1;
}
/**
* Set the byte at the given index to the specified value
*/
public void set(long index, byte value) {
int segmentIndex = (int)(index >> log2OfSegmentSize);
ensureCapacity(segmentIndex);
segments[segmentIndex][(int)(index & bitmask)] = value;
}
/**
* Get the value of the byte at the specified index.
*/
public byte get(long index) {
return segments[(int)(index >>> log2OfSegmentSize)][(int)(index & bitmask)];
}
/**
* Copy bytes from another ByteData to this array.
*
* @param src the source data
* @param srcPos the position to begin copying from the source data
* @param destPos the position to begin writing in this array
* @param length the length of the data to copy
*/
public void copy(ByteData src, long srcPos, long destPos, long length) {
for(long i=0;i<length;i++) {
set(destPos++, src.get(srcPos++));
}
}
/**
* For a SegmentedByteArray, this is a faster copy implementation.
*
* @param src
* @param srcPos
* @param destPos
* @param length
*/
public void copy(SegmentedByteArray src, long srcPos, long destPos, long length) {
int segmentLength = 1 << log2OfSegmentSize;
int currentSegment = (int)(destPos >>> log2OfSegmentSize);
int segmentStartPos = (int)(destPos & bitmask);
int remainingBytesInSegment = segmentLength - segmentStartPos;
while(length > 0) {
int bytesToCopyFromSegment = (int)Math.min(remainingBytesInSegment, length);
ensureCapacity(currentSegment);
int copiedBytes = src.copy(srcPos, segments[currentSegment], segmentStartPos, bytesToCopyFromSegment);
srcPos += copiedBytes;
length -= copiedBytes;
segmentStartPos = 0;
remainingBytesInSegment = segmentLength;
currentSegment++;
}
}
/**
* copies exactly data.length bytes from this SegmentedByteArray into the provided byte array
*
* @param index
* @param data
* @return the number of bytes copied
*/
public int copy(long srcPos, byte[] data, int destPos, int length) {
int segmentSize = 1 << log2OfSegmentSize;
int remainingBytesInSegment = (int)(segmentSize - (srcPos & bitmask));
int dataPosition = destPos;
while(length > 0) {
byte[] segment = segments[(int)(srcPos >>> log2OfSegmentSize)];
int bytesToCopyFromSegment = Math.min(remainingBytesInSegment, length);
System.arraycopy(segment, (int)(srcPos & bitmask), data, dataPosition, bytesToCopyFromSegment);
dataPosition += bytesToCopyFromSegment;
srcPos += bytesToCopyFromSegment;
remainingBytesInSegment = segmentSize - (int)(srcPos & bitmask);
length -= bytesToCopyFromSegment;
}
return dataPosition - destPos;
}
public void readFrom(RandomAccessFile file, long pointer, int length) throws IOException {
file.seek(pointer);
int segmentSize = 1 << log2OfSegmentSize;
int segment = 0;
while(length > 0) {
ensureCapacity(segment);
int bytesToCopy = Math.min(segmentSize, length);
int bytesCopied = 0;
while(bytesCopied < bytesToCopy){
bytesCopied += file.read(segments[segment], bytesCopied, (bytesToCopy - bytesCopied));
}
segment++;
length -= bytesCopied;
}
}
/**
* Write a portion of this data to an OutputStream.
*/
public void writeTo(OutputStream os, long startPosition, long len) throws IOException {
int segmentSize = 1 << log2OfSegmentSize;
int remainingBytesInSegment = segmentSize - (int)(startPosition & bitmask);
long remainingBytesInCopy = len;
while(remainingBytesInCopy > 0) {
long bytesToCopyFromSegment = Math.min(remainingBytesInSegment, remainingBytesInCopy);
os.write(segments[(int)(startPosition >>> log2OfSegmentSize)], (int)(startPosition & bitmask), (int)bytesToCopyFromSegment);
startPosition += bytesToCopyFromSegment;
remainingBytesInSegment = segmentSize - (int)(startPosition & bitmask);
remainingBytesInCopy -= bytesToCopyFromSegment;
}
}
/**
* Ensures that the segment at segmentIndex exists
*
* @param segmentIndex
*/
private void ensureCapacity(int segmentIndex) {
while(segmentIndex >= segments.length) {
segments = Arrays.copyOf(segments, segments.length * 3 / 2);
}
if(segments[segmentIndex] == null) {
segments[segmentIndex] = new byte[1 << log2OfSegmentSize];
}
}
}
| 8,341 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/record/FastBlobDeserializationRecord.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.record;
import com.netflix.zeno.fastblob.record.schema.FastBlobSchema;
import com.netflix.zeno.fastblob.record.schema.FastBlobSchema.FieldType;
import com.netflix.zeno.serializer.NFDeserializationRecord;
/**
* Produces a set of offsets into the fields for a specific object.<p/>
*
* The schema tells us how to read the data encoded in the SegmentedByteArray.<p/>
*
* When we position to a specific object instance's offset, we use the schema to guide us to set the pointers for each field in that instance.
*
* @author dkoszewnik
*
*/
public class FastBlobDeserializationRecord extends NFDeserializationRecord {
private final ByteData byteData;
private final long fieldPointers[];
public FastBlobDeserializationRecord(FastBlobSchema schema, ByteData byteData) {
super(schema);
this.fieldPointers = new long[schema.numFields()];
this.byteData = byteData;
}
public long position() {
return fieldPointers[0];
}
/**
* Position this record to the byte at index <code>objectBeginOffset</code>.
*
* @param objectBeginOffset
* @return The length of the object's data, in bytes.
*/
public int position(long objectBeginOffset) {
long currentPosition = objectBeginOffset;
for(int i=0;i<fieldPointers.length;i++) {
fieldPointers[i] = currentPosition;
FieldType type = getSchema().getFieldType(i);
currentPosition += fieldLength(currentPosition, type);
}
return (int)(currentPosition - objectBeginOffset);
}
/**
* Get the underlying byte data where this record is contained.
*/
public ByteData getByteData() {
return byteData;
}
/**
* get the offset into the byte data for the field represented by the String.
*/
public long getPosition(String fieldName) {
int fieldPosition = getSchema().getPosition(fieldName);
if(fieldPosition == -1)
return -1;
return fieldPointers[fieldPosition];
}
/**
* get the length of the specified field for this record
*/
public int getFieldLength(String fieldName) {
int fieldPosition = getSchema().getPosition(fieldName);
FieldType fieldType = getSchema().getFieldType(fieldPosition);
return fieldLength(fieldPointers[fieldPosition], fieldType);
}
private int fieldLength(long currentPosition, FieldType type) {
if(type.startsWithVarIntEncodedLength()) {
if(VarInt.readVNull(byteData, currentPosition)) {
return 1;
} else {
int fieldLength = VarInt.readVInt(byteData, currentPosition);
return VarInt.sizeOfVInt(fieldLength) + fieldLength;
}
} else if(type.getFixedLength() != -1) {
return type.getFixedLength();
} else {
if(VarInt.readVNull(byteData, currentPosition)) {
return 1;
} else {
long value = VarInt.readVLong(byteData, currentPosition);
return VarInt.sizeOfVLong(value);
}
}
}
}
| 8,342 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/record/SegmentedByteArrayHasher.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.record;
/**
* This class performs a fast murmurhash3 on a sequence of bytes.<p/>
*
* MurmurHash is a high quality hash algorithm for byte data:<p/>
*
* http://en.wikipedia.org/wiki/MurmurHash
*/
public class SegmentedByteArrayHasher {
private static final int SEED = 0xeab524b9;
public static int hashCode(ByteDataBuffer data) {
return hashCode(data.getUnderlyingArray(), 0, (int)data.length());
}
/**
* MurmurHash3. Adapted from:<p/>
*
* https://github.com/yonik/java_util/blob/master/src/util/hash/MurmurHash3.java<p/>
*
* On 11/19/2013 the license for this file read:<p/>
*
* The MurmurHash3 algorithm was created by Austin Appleby. This java port was authored by
* Yonik Seeley and is placed into the public domain. The author hereby disclaims copyright
* to this source code.
* <p>
* This produces exactly the same hash values as the final C++
* version of MurmurHash3 and is thus suitable for producing the same hash values across
* platforms.
* <p>
* The 32 bit x86 version of this hash should be the fastest variant for relatively short keys like ids.
* <p>
* Note - The x86 and x64 versions do _not_ produce the same results, as the
* algorithms are optimized for their respective platforms.
* <p>
* See http://github.com/yonik/java_util for future updates to this file.
*
*/
///
public static int hashCode(ByteData data, long offset, int len) {
final int c1 = 0xcc9e2d51;
final int c2 = 0x1b873593;
int h1 = SEED;
long roundedEnd = offset + (len & 0xfffffffffffffffcL); // round down to 4 byte
// block
for (long i = offset; i < roundedEnd; i += 4) {
// little endian load order
int k1 = (data.get(i) & 0xff) | ((data.get(i + 1) & 0xff) << 8) | ((data.get(i + 2) & 0xff) << 16) | (data.get(i + 3) << 24);
k1 *= c1;
k1 = (k1 << 15) | (k1 >>> 17); // ROTL32(k1,15);
k1 *= c2;
h1 ^= k1;
h1 = (h1 << 13) | (h1 >>> 19); // ROTL32(h1,13);
h1 = h1 * 5 + 0xe6546b64;
}
// tail
int k1 = 0;
switch (len & 0x03) {
case 3:
k1 = (data.get(roundedEnd + 2) & 0xff) << 16;
// fallthrough
case 2:
k1 |= (data.get(roundedEnd + 1) & 0xff) << 8;
// fallthrough
case 1:
k1 |= (data.get(roundedEnd) & 0xff);
k1 *= c1;
k1 = (k1 << 15) | (k1 >>> 17); // ROTL32(k1,15);
k1 *= c2;
h1 ^= k1;
}
// finalization
h1 ^= len;
// fmix(h1);
h1 ^= h1 >>> 16;
h1 *= 0x85ebca6b;
h1 ^= h1 >>> 13;
h1 *= 0xc2b2ae35;
h1 ^= h1 >>> 16;
return h1;
}
}
| 8,343 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/record/FastBlobSerializationRecord.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.record;
import com.netflix.zeno.fastblob.FastBlobFrameworkSerializer;
import com.netflix.zeno.fastblob.record.schema.FastBlobSchema;
import com.netflix.zeno.fastblob.record.schema.FastBlobSchema.FieldType;
import com.netflix.zeno.fastblob.state.FastBlobTypeSerializationState;
import com.netflix.zeno.serializer.NFSerializationRecord;
/**
* An NFSerializationRecord for the FastBlobStateEngine serialization framework.<p/>
*
* This is the write record for the FastBlob. It conforms to a FastBlobSchema.<p/>
*
* Each field in the schema is assigned a ByteDataBuffer to which the FastBlobFrameworkSerializer will
* write the bytes for the serialized representation for that field.<p/>
*
* Once all of the fields for the object are written, the fields can be concatenated via the writeDataTo() method
* to some other ByteDataBuffer (in the normal server setup, this will be the ByteDataBuffer in the ByteArrayOrdinalMap).<p/>
*
* This class also retains the image membership information. When an object is added to the FastBlobStateEngine, it
* is specified which images it should be added to with a boolean array (see {@link FastBlobTypeSerializationState}.add()).
* This information needs to be propagated down, during traversal for serialization, to each child object which is referenced
* by the top level object. A handle to this image membership information is also retained in this record for this purpose.
*
* @author dkoszewnik
*
*/
public class FastBlobSerializationRecord extends NFSerializationRecord {
private final ByteDataBuffer fieldData[];
private final boolean isNonNull[];
private long imageMembershipsFlags;
/**
* Create a new FastBlobSerializationRecord which conforms to the given FastBlobSchema.
*/
public FastBlobSerializationRecord(FastBlobSchema schema) {
this.fieldData = new ByteDataBuffer[schema.numFields()];
this.isNonNull = new boolean[schema.numFields()];
for (int i = 0; i < fieldData.length; i++) {
fieldData[i] = new ByteDataBuffer(32);
}
setSchema(schema);
}
/**
* Returns the buffer which should be used to serialize the data for the given field.
*
* @param field
* @return
*/
public ByteDataBuffer getFieldBuffer(String field) {
int fieldPosition = getSchema().getPosition(field);
return getFieldBuffer(fieldPosition);
}
/**
* Returns the buffer which should be used to serialize the data for the field at the given position in the schema.<p/>
*
* This is used by the FastBlobFrameworkSerializer when writing the data for a specific field.
*
* @param field
* @return
*/
public ByteDataBuffer getFieldBuffer(int fieldPosition) {
isNonNull[fieldPosition] = true;
fieldData[fieldPosition].reset();
return fieldData[fieldPosition];
}
/**
* Concatenates all fields, in order, to the ByteDataBuffer supplied. This concatenation is the
* verbatim serialized representation in the FastBlob.
*
* @param buf
*/
public void writeDataTo(ByteDataBuffer buf) {
for (int i = 0; i < fieldData.length; i++) {
if (isNonNull[i]) {
if (getSchema().getFieldType(i).startsWithVarIntEncodedLength())
VarInt.writeVInt(buf, (int)fieldData[i].length());
fieldData[i].copyTo(buf);
} else {
if(getSchema().getFieldType(i) == FieldType.FLOAT) {
FastBlobFrameworkSerializer.writeNullFloat(buf);
} else if(getSchema().getFieldType(i) == FieldType.DOUBLE) {
FastBlobFrameworkSerializer.writeNullDouble(buf);
} else {
VarInt.writeVNull(buf);
}
}
}
}
/**
* Reset the ByteDataBuffers for each field.
*/
public void reset() {
for (int i = 0; i < fieldData.length; i++) {
isNonNull[i] = false;
}
}
/**
* This is the image membership information for the object represented by this record.<p/>
*
* It is contained here so that it may be passed down by the FastBlobFrameworkSerializer when
* making the call to serialize child objects which are referenced by this object.
*
* @param imageMembershipsFlags
*/
public void setImageMembershipsFlags(long imageMembershipsFlags) {
this.imageMembershipsFlags = imageMembershipsFlags;
}
public long getImageMembershipsFlags() {
return imageMembershipsFlags;
}
}
| 8,344 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/record/VarInt.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.record;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
/**
* Variable-byte integer encoding and decoding logic
*
* @author dkoszewnik
*/
public class VarInt {
public static void writeVNull(ByteDataBuffer buf) {
buf.write((byte)0x80);
return;
}
public static void writeVLong(ByteDataBuffer buf, long value) {
if(value < 0) buf.write((byte)0x81);
if(value > 0xFFFFFFFFFFFFFFL || value < 0) buf.write((byte)(0x80 | ((value >>> 56) & 0x7FL)));
if(value > 0x1FFFFFFFFFFFFL || value < 0) buf.write((byte)(0x80 | ((value >>> 49) & 0x7FL)));
if(value > 0x3FFFFFFFFFFL || value < 0) buf.write((byte)(0x80 | ((value >>> 42) & 0x7FL)));
if(value > 0x7FFFFFFFFL || value < 0) buf.write((byte)(0x80 | ((value >>> 35) & 0x7FL)));
if(value > 0xFFFFFFFL || value < 0) buf.write((byte)(0x80 | ((value >>> 28) & 0x7FL)));
if(value > 0x1FFFFFL || value < 0) buf.write((byte)(0x80 | ((value >>> 21) & 0x7FL)));
if(value > 0x3FFFL || value < 0) buf.write((byte)(0x80 | ((value >>> 14) & 0x7FL)));
if(value > 0x7FL || value < 0) buf.write((byte)(0x80 | ((value >>> 7) & 0x7FL)));
buf.write((byte)(value & 0x7FL));
}
public static void writeVLong(OutputStream out, long value) throws IOException {
if(value < 0) out.write((byte)0x81);
if(value > 0xFFFFFFFFFFFFFFL || value < 0) out.write((byte)(0x80 | ((value >>> 56) & 0x7FL)));
if(value > 0x1FFFFFFFFFFFFL || value < 0) out.write((byte)(0x80 | ((value >>> 49) & 0x7FL)));
if(value > 0x3FFFFFFFFFFL || value < 0) out.write((byte)(0x80 | ((value >>> 42) & 0x7FL)));
if(value > 0x7FFFFFFFFL || value < 0) out.write((byte)(0x80 | ((value >>> 35) & 0x7FL)));
if(value > 0xFFFFFFFL || value < 0) out.write((byte)(0x80 | ((value >>> 28) & 0x7FL)));
if(value > 0x1FFFFFL || value < 0) out.write((byte)(0x80 | ((value >>> 21) & 0x7FL)));
if(value > 0x3FFFL || value < 0) out.write((byte)(0x80 | ((value >>> 14) & 0x7FL)));
if(value > 0x7FL || value < 0) out.write((byte)(0x80 | ((value >>> 7) & 0x7FL)));
out.write((byte)(value & 0x7FL));
}
public static void writeVInt(ByteDataBuffer buf, int value) {
if(value > 0x0FFFFFFF || value < 0) buf.write((byte)(0x80 | ((value >>> 28))));
if(value > 0x1FFFFF || value < 0) buf.write((byte)(0x80 | ((value >>> 21) & 0x7F)));
if(value > 0x3FFF || value < 0) buf.write((byte)(0x80 | ((value >>> 14) & 0x7F)));
if(value > 0x7F || value < 0) buf.write((byte)(0x80 | ((value >>> 7) & 0x7F)));
buf.write((byte)(value & 0x7F));
}
public static void writeVInt(OutputStream out, int value) throws IOException {
if(value > 0x0FFFFFFF || value < 0) out.write((byte)(0x80 | ((value >>> 28))));
if(value > 0x1FFFFF || value < 0) out.write((byte)(0x80 | ((value >>> 21) & 0x7F)));
if(value > 0x3FFF || value < 0) out.write((byte)(0x80 | ((value >>> 14) & 0x7F)));
if(value > 0x7F || value < 0) out.write((byte)(0x80 | ((value >>> 7) & 0x7F)));
out.write((byte)(value & 0x7F));
}
public static boolean readVNull(ByteData arr, long position) {
return arr.get(position) == (byte)0x80;
}
public static int readVInt(ByteData arr, long position) {
byte b = arr.get(position++);
if(b == (byte) 0x80)
throw new RuntimeException("Attempting to read null value as int");
int value = b & 0x7F;
while ((b & 0x80) != 0) {
b = arr.get(position++);
value <<= 7;
value |= (b & 0x7F);
}
return value;
}
public static int readVInt(InputStream in) throws IOException {
byte b = (byte)in.read();
if(b == (byte) 0x80)
throw new RuntimeException("Attempting to read null value as int");
int value = b & 0x7F;
while ((b & 0x80) != 0) {
b = (byte)in.read();
value <<= 7;
value |= (b & 0x7F);
}
return value;
}
public static long readVLong(ByteData arr, long position) {
byte b = arr.get(position++);
if(b == (byte) 0x80)
throw new RuntimeException("Attempting to read null value as long");
long value = b & 0x7F;
while ((b & 0x80) != 0) {
b = arr.get(position++);
value <<= 7;
value |= (b & 0x7F);
}
return value;
}
public static long readVLong(InputStream in) throws IOException {
byte b = (byte)in.read();
if(b == (byte) 0x80)
throw new RuntimeException("Attempting to read null value as long");
long value = b & 0x7F;
while ((b & 0x80) != 0) {
b = (byte)in.read();
value <<= 7;
value |= (b & 0x7F);
}
return value;
}
public static int sizeOfVInt(int value) {
if(value < 0)
return 5;
if(value < 0x80)
return 1;
if(value < 0x4000)
return 2;
if(value < 0x200000)
return 3;
if(value < 0x10000000)
return 4;
return 5;
}
public static int sizeOfVLong(long value) {
if(value < 0L)
return 10;
if(value < 0x80L)
return 1;
if(value < 0x4000L)
return 2;
if(value < 0x200000L)
return 3;
if(value < 0x10000000L)
return 4;
if(value < 0x800000000L)
return 5;
if(value < 0x40000000000L)
return 6;
if(value < 0x2000000000000L)
return 7;
if(value < 0x100000000000000L)
return 8;
return 9;
}
public static int countVarIntsInRange(ByteData byteData, long fieldPosition, int length) {
int numInts = 0;
boolean insideInt = false;
for(int i=0;i<length;i++) {
byte b = byteData.get(fieldPosition + i);
if((b & 0x80) == 0) {
numInts++;
insideInt = false;
} else if(!insideInt && b == (byte)0x80) {
numInts++;
} else {
insideInt = true;
}
}
return numInts;
}
}
| 8,345 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/record | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/record/schema/FastBlobSchema.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.record.schema;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Arrays;
/**
* A schema for a record contained in a FastBlob.<p/>
*
* The fields each have a position, which is the order in which they will appear in a FastBlob serialized representation.<p/>
*
* The schema is a hash table of Strings (field name) to field position.<p/>
*
* Schemas are flat lists of fields, each specified by (fieldName, fieldType, objectType). objectType will be null for primitive types.
*
* @author dkoszewnik
*
*/
public class FastBlobSchema {
private final String schemaName;
private final int hashedPositionArray[];
private final String fieldNames[];
private final FieldDefinition fieldDefinitions[];
private int size;
public FastBlobSchema(String schemaName, int numFields) {
this.schemaName = schemaName;
this.hashedPositionArray = new int[1 << (32 - Integer.numberOfLeadingZeros(numFields * 10 / 7))];
this.fieldNames = new String[numFields];
this.fieldDefinitions = new FieldDefinition[numFields];
Arrays.fill(hashedPositionArray, -1);
}
public String getName() {
return schemaName;
}
/**
* Add a field into this <code>FastBlobSchema</code>.
*
* @return the position of the field.
*
* @deprecated use addField with a new FieldDefinition instead.
*/
@Deprecated
public int addField(String fieldName, FieldType fieldType) {
return addField(fieldName, new FieldDefinition(fieldType));
}
/**
* Add a field into this <code>FastBlobSchema</code>.
*
* The position of the field is hashed into the <code>hashedPositionArray</code> by the hashCode of the fieldName.<p/>
*
* Create a new FieldDefinition for your type as follows:<p/>
*
* <ul>
* <li>For a primitive value, use new FieldDefinition(...)</li>
* <li>For an OBJECT, LIST, or SET, use new TypedFieldDefinition(...)</li>
* <li>For a MAP, use new MapFieldDefinition(...)</li>
* </ul>
*
* @return the position of the field.
*/
public int addField(String fieldName, FieldDefinition fieldDefinition) {
fieldNames[size] = fieldName;
fieldDefinitions[size] = fieldDefinition;
hashPositionIntoArray(size);
return size++;
}
/**
* Returns the position of a field previously added to the map, or -1 if the field has not been added to the map.
*
* The positions of the fields are hashed into the <code>hashedPositionArray</code> by the hashCode of the fieldName.
*/
public int getPosition(String fieldName) {
int hash = hashInt(fieldName.hashCode());
int bucket = hash % hashedPositionArray.length;
int position = hashedPositionArray[bucket];
while(position != -1) {
if(fieldNames[position].equals(fieldName))
return position;
bucket = (bucket + 1) % hashedPositionArray.length;
position = hashedPositionArray[bucket];
}
return -1;
}
/**
* @return The name of the field at the specified position
*/
public String getFieldName(int fieldPosition) {
return fieldNames[fieldPosition];
}
/**
* @return The type of the field with the given name
*/
public FieldType getFieldType(String fieldName) {
int position = getPosition(fieldName);
if(position == -1)
throw new IllegalArgumentException("Field name " + fieldName + " does not exist in schema " + schemaName);
return fieldDefinitions[position].getFieldType();
}
/**
* @return The type of the field at the specified position
*/
public FieldType getFieldType(int fieldPosition) {
return fieldDefinitions[fieldPosition].getFieldType();
}
/**
* @return The type of the field with the given name
*/
public FieldDefinition getFieldDefinition(String fieldName) {
int position = getPosition(fieldName);
if(position == -1)
throw new IllegalArgumentException("Field name " + fieldName + " does not exist in schema " + schemaName);
return getFieldDefinition(position);
}
/**
* @return The FieldDefinition at the specified position
*/
public FieldDefinition getFieldDefinition(int fieldPosition) {
return fieldDefinitions[fieldPosition];
}
/**
* @return the object type of the field with the given name
*/
public String getObjectType(String fieldName) {
int position = getPosition(fieldName);
if(position == -1)
throw new IllegalArgumentException("Field name " + fieldName + " does not exist in schema " + schemaName);
return getObjectType(position);
}
/**
* @return The object type at the specified position
*/
public String getObjectType(int fieldPosition) {
if(fieldDefinitions[fieldPosition] instanceof TypedFieldDefinition) {
return ((TypedFieldDefinition)fieldDefinitions[fieldPosition]).getSubType();
}
return null;
}
/**
* @return The number of fields in this schema.
*/
public int numFields() {
return size;
}
private void hashPositionIntoArray(int ordinal) {
int hash = hashInt(fieldNames[ordinal].hashCode());
int bucket = hash % hashedPositionArray.length;
while(hashedPositionArray[bucket] != -1) {
bucket = (bucket + 1) % hashedPositionArray.length;
}
hashedPositionArray[bucket] = ordinal;
}
private int hashInt(int key) {
key = ~key + (key << 15);
key = key ^ (key >>> 12);
key = key + (key << 2);
key = key ^ (key >>> 4);
key = key * 2057;
key = key ^ (key >>> 16);
return key & Integer.MAX_VALUE;
}
@Override
public boolean equals(Object other) {
if(other instanceof FastBlobSchema) {
FastBlobSchema otherSchema = (FastBlobSchema) other;
if(otherSchema.schemaName.equals(schemaName)) {
if(otherSchema.size == size) {
for(int i=0;i<otherSchema.size;i++) {
if(!otherSchema.getFieldName(i).equals(getFieldName(i))) {
return false;
}
if(!otherSchema.getFieldDefinition(i).equals(getFieldDefinition(i))) {
return false;
}
}
return true;
}
}
}
return false;
}
/**
* Write this FastBlobSchema to a stream.
*/
public void writeTo(DataOutputStream dos) throws IOException {
dos.writeUTF(schemaName);
dos.writeShort(size);
for(int i=0;i<size;i++) {
dos.writeUTF(fieldNames[i]);
writeFieldDefinition(dos, fieldDefinitions[i]);
}
}
private void writeFieldDefinition(DataOutputStream dos, FieldDefinition def)throws IOException {
FieldType fieldType = def.getFieldType();
dos.writeUTF(fieldType.name());
if(fieldType == FieldType.OBJECT || fieldType == FieldType.LIST || fieldType == FieldType.SET) {
if(def instanceof TypedFieldDefinition) {
dos.writeUTF(((TypedFieldDefinition)def).getSubType());
} else {
dos.writeUTF("");
}
} else if(fieldType == FieldType.MAP) {
if(def instanceof MapFieldDefinition) {
MapFieldDefinition mfd = (MapFieldDefinition)def;
dos.writeUTF(mfd.getKeyType());
dos.writeUTF(mfd.getValueType());
} else {
dos.writeUTF("");
dos.writeUTF("");
}
}
}
/**
* Read a FastBlobSchema from a stream.
*/
public static FastBlobSchema readFrom(DataInputStream dis) throws IOException {
String name = dis.readUTF();
int size = dis.readShort();
FastBlobSchema schema = new FastBlobSchema(name, size);
for(int i=0;i<size;i++) {
String fieldName = dis.readUTF();
FieldDefinition def = readFieldDefinition(dis);
schema.addField(fieldName, def);
}
return schema;
}
private static FieldDefinition readFieldDefinition(DataInputStream dis) throws IOException {
FieldType fieldType = Enum.valueOf(FieldType.class, dis.readUTF());
if(fieldType == FieldType.OBJECT || fieldType == FieldType.LIST || fieldType == FieldType.SET) {
String subType = dis.readUTF();
if(!subType.isEmpty())
return new TypedFieldDefinition(fieldType, subType);
} else if(fieldType == FieldType.MAP) {
String keyType = dis.readUTF();
String valueType = dis.readUTF();
if(!keyType.isEmpty())
return new MapFieldDefinition(keyType, valueType);
}
return new FieldDefinition(fieldType);
}
/**
* All allowable field types.
*/
public static enum FieldType {
OBJECT(-1, false),
BOOLEAN(1, false),
INT(-1, false),
LONG(-1, false),
FLOAT(4, false),
DOUBLE(8, false),
STRING(-1, true),
BYTES(-1, true),
LIST(-1, true),
SET(-1, true),
@Deprecated
/**
* @deprecated Use SET or LIST instead of COLLECTION
*/
COLLECTION(-1, true),
MAP(-1, true);
private final int fixedLength;
private final boolean varIntEncodesLength;
private FieldType(int fixedLength, boolean varIntEncodesLength) {
this.fixedLength = fixedLength;
this.varIntEncodesLength = varIntEncodesLength;
}
public int getFixedLength() {
return fixedLength;
}
public boolean startsWithVarIntEncodedLength() {
return varIntEncodesLength;
}
}
}
| 8,346 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/record | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/record/schema/FieldDefinition.java | package com.netflix.zeno.fastblob.record.schema;
import com.netflix.zeno.fastblob.record.schema.FastBlobSchema.FieldType;
public class FieldDefinition {
private final FieldType fieldType;
public FieldDefinition(FieldType fieldType) {
this.fieldType = fieldType;
}
public FieldType getFieldType() {
return fieldType;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result
+ ((fieldType == null) ? 0 : fieldType.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
FieldDefinition other = (FieldDefinition) obj;
if (fieldType != other.fieldType)
return false;
return true;
}
}
| 8,347 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/record | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/record/schema/TypedFieldDefinition.java | package com.netflix.zeno.fastblob.record.schema;
import com.netflix.zeno.fastblob.record.schema.FastBlobSchema.FieldType;
public class TypedFieldDefinition extends FieldDefinition {
private final String subType;
public TypedFieldDefinition(FieldType fieldType, String subType) {
super(fieldType);
this.subType = subType;
}
public String getSubType() {
return subType;
}
@Override
public int hashCode() {
final int prime = 31;
int result = super.hashCode();
result = prime * result + ((subType == null) ? 0 : subType.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (!super.equals(obj))
return false;
if (getClass() != obj.getClass())
return false;
TypedFieldDefinition other = (TypedFieldDefinition) obj;
if (subType == null) {
if (other.subType != null)
return false;
} else if (!subType.equals(other.subType))
return false;
return true;
}
}
| 8,348 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/record | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/record/schema/MapFieldDefinition.java | package com.netflix.zeno.fastblob.record.schema;
import com.netflix.zeno.fastblob.record.schema.FastBlobSchema.FieldType;
public class MapFieldDefinition extends FieldDefinition {
private final String keyType;
private final String valueType;
public MapFieldDefinition(String keyType, String valueType) {
super(FieldType.MAP);
this.keyType = keyType;
this.valueType = valueType;
}
public String getKeyType() {
return keyType;
}
public String getValueType() {
return valueType;
}
@Override
public int hashCode() {
final int prime = 31;
int result = super.hashCode();
result = prime * result + ((keyType == null) ? 0 : keyType.hashCode());
result = prime * result
+ ((valueType == null) ? 0 : valueType.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (!super.equals(obj))
return false;
if (getClass() != obj.getClass())
return false;
MapFieldDefinition other = (MapFieldDefinition) obj;
if (keyType == null) {
if (other.keyType != null)
return false;
} else if (!keyType.equals(other.keyType))
return false;
if (valueType == null) {
if (other.valueType != null)
return false;
} else if (!valueType.equals(other.valueType))
return false;
return true;
}
} | 8,349 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/io/FastBlobReader.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.io;
import com.netflix.zeno.fastblob.FastBlobHeapFriendlyClientFrameworkSerializer;
import com.netflix.zeno.fastblob.FastBlobStateEngine;
import com.netflix.zeno.fastblob.record.ByteDataBuffer;
import com.netflix.zeno.fastblob.record.FastBlobDeserializationRecord;
import com.netflix.zeno.fastblob.record.FastBlobSerializationRecord;
import com.netflix.zeno.fastblob.record.StreamingByteData;
import com.netflix.zeno.fastblob.record.VarInt;
import com.netflix.zeno.fastblob.record.schema.FastBlobSchema;
import com.netflix.zeno.fastblob.state.ByteArrayOrdinalMap;
import com.netflix.zeno.fastblob.state.FastBlobTypeDeserializationState;
import com.netflix.zeno.serializer.NFTypeSerializer;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
/**
* Reads FastBlob snapshots and deltas from streams.<p/>
*
* The modifications will be applied to the FastBlobStateEngine supplied in the constructor.
*/
public class FastBlobReader {
private final FastBlobStateEngine stateEngine;
private FastBlobHeaderReader headerReader;
private FastBlobReaderEventHandler eventHandler = null;
public FastBlobReader(FastBlobStateEngine stateEngine) {
this.stateEngine = stateEngine;
this.headerReader = new ZenoFastBlobHeaderReader();
}
public void setFastBlobHeaderReader(FastBlobHeaderReader headerReader) {
this.headerReader = headerReader;
}
public void setEventHandler(FastBlobReaderEventHandler eventHandler) {
this.eventHandler = eventHandler;
}
/**
* Read a snapshot from the specified stream. Apply the snapshot to the FastBlobStateEngine supplied in the constructor of this class.
*/
public void readSnapshot(InputStream is) throws IOException {
FastBlobHeader header = readHeader(is);
StreamingByteData byteData = getStreamingByteData(is, header.getDeserializationBufferSizeHint());
DataInputStream dis = new DataInputStream(byteData);
int numTypes = header.getNumberOfTypes();
if(stateEngine.getLatestVersion() == null) {
readSnapshotTypes(byteData, dis, numTypes);
} else {
readSnapshotTypesDoubleSnapshotRefresh(byteData, dis, numTypes);
}
///The version must be set *after* the changes are applied. This will protect against
///bad data in the event of an Exception midway through parsing.
stateEngine.setLatestVersion(header.getVersion());
}
/**
* Read a snapshot with no current states populated.
*/
private void readSnapshotTypes(StreamingByteData byteData, DataInputStream dis, int numTypes) throws IOException {
for(int i=0;i<numTypes;i++) {
/// type flags byte -- reserved for later use
dis.read();
FastBlobSchema schema = FastBlobSchema.readFrom(dis);
readTypeStateObjects(byteData, schema);
}
}
/**
* Read a snapshot with a state currently populated. This is the "heap-friendly" version
*/
private void readSnapshotTypesDoubleSnapshotRefresh(StreamingByteData byteData, DataInputStream dis, int numTypes) throws IOException {
ByteArrayOrdinalMap serializedRepresentationMap = new ByteArrayOrdinalMap();
stateEngine.prepareForDoubleSnapshotRefresh();
for(int i=0;i<numTypes;i++) {
/// type flags byte -- reserved for later use
dis.read();
FastBlobSchema schema = FastBlobSchema.readFrom(dis);
readTypeStateObjectsDoubleSnapshotRefresh(byteData, schema, serializedRepresentationMap);
serializedRepresentationMap.clear();
}
stateEngine.cleanUpAfterDoubleSnapshotRefresh();
}
/**
* Read a delta from the specified stream. Apply the delta to the FastBlobStateEngine supplied in the constructor of this class.<p/>
*
* This method performs no validation of the data. It is assumed that the data currently represented in the FastBlobStateEngine is
* in the state which the server was in when it was produced this delta. Otherwise, the results are undefined.
*/
public void readDelta(InputStream is) throws IOException {
FastBlobHeader header = readHeader(is);
StreamingByteData byteData = getStreamingByteData(is, header.getDeserializationBufferSizeHint());
DataInputStream dis = new DataInputStream(byteData);
int numTypes = header.getNumberOfTypes();
for(int i=0;i<numTypes;i++) {
/// type flags byte -- reserved for later use
dis.read();
FastBlobSchema schema = FastBlobSchema.readFrom(dis);
readTypeStateRemovals(byteData, schema);
readTypeStateObjects(byteData, schema);
}
///The version must be set *after* the changes are applied. This will protect against
///bad data in the event of an Exception midway through parsing.
stateEngine.setLatestVersion(header.getVersion());
}
/**
* Read the header and return the version
*/
private FastBlobHeader readHeader(InputStream is) throws IOException {
FastBlobHeader header = headerReader.readHeader(is);
stateEngine.addHeaderTags(header.getHeaderTags());
return header;
}
private StreamingByteData getStreamingByteData(InputStream is, int deserializationBufferSizeHint) throws IOException {
StreamingByteData byteData = new StreamingByteData(is, deserializationBufferSizeHint);
return byteData;
}
private void readTypeStateRemovals(StreamingByteData byteData, FastBlobSchema schema) throws IOException {
FastBlobTypeDeserializationState<?> typeDeserializationState = stateEngine.getTypeDeserializationState(schema.getName());
int numRemovals = VarInt.readVInt(byteData);
int currentRemoval = 0;
if(numRemovals != 0 && eventHandler != null) {
eventHandler.removedObjects(schema.getName(), numRemovals);
}
for(int i=0;i<numRemovals;i++) {
currentRemoval += VarInt.readVInt(byteData);
if(typeDeserializationState != null) {
typeDeserializationState.remove(currentRemoval);
}
}
}
private void readTypeStateObjects(StreamingByteData byteData,FastBlobSchema schema) throws IOException {
FastBlobDeserializationRecord rec = new FastBlobDeserializationRecord(schema, byteData);
FastBlobTypeDeserializationState<?> typeDeserializationState = stateEngine.getTypeDeserializationState(schema.getName());
int numObjects = VarInt.readVInt(byteData);
if(numObjects != 0 && eventHandler != null) {
eventHandler.addedObjects(schema.getName(), numObjects);
}
int currentOrdinal = 0;
for(int j=0;j<numObjects;j++) {
int currentOrdinalDelta = VarInt.readVInt(byteData);
currentOrdinal += currentOrdinalDelta;
int objectSize = rec.position(byteData.currentStreamPosition());
byteData.incrementStreamPosition(objectSize);
if(typeDeserializationState != null) {
typeDeserializationState.add(currentOrdinal, rec);
}
}
}
private <T> void readTypeStateObjectsDoubleSnapshotRefresh(StreamingByteData byteData, FastBlobSchema schema, ByteArrayOrdinalMap map) throws IOException{
FastBlobHeapFriendlyClientFrameworkSerializer frameworkSerializer = (FastBlobHeapFriendlyClientFrameworkSerializer)stateEngine.getFrameworkSerializer();
FastBlobDeserializationRecord rec = new FastBlobDeserializationRecord(schema, byteData);
FastBlobTypeDeserializationState<T> typeDeserializationState = stateEngine.getTypeDeserializationState(schema.getName());
FastBlobSerializationRecord serializationRecord = null;
ByteDataBuffer deserializedRecordBuffer = null;
int numObjects = VarInt.readVInt(byteData);
int numObjectsReused = 0;
int numFlawedSerializationIntegrity = 0;
if(numObjects != 0 && eventHandler != null) {
eventHandler.addedObjects(schema.getName(), numObjects);
}
if(typeDeserializationState != null) {
serializationRecord = new FastBlobSerializationRecord(typeDeserializationState.getSchema());
frameworkSerializer.setCheckSerializationIntegrity(false);
deserializedRecordBuffer = new ByteDataBuffer();
typeDeserializationState.populateByteArrayOrdinalMap(map);
frameworkSerializer.setCheckSerializationIntegrity(true);
}
int currentOrdinal = 0;
for(int j=0;j<numObjects;j++) {
int currentOrdinalDelta = VarInt.readVInt(byteData);
currentOrdinal += currentOrdinalDelta;
int recordSize = rec.position(byteData.currentStreamPosition());
if(typeDeserializationState != null) {
NFTypeSerializer<T> serializer = typeDeserializationState.getSerializer();
T deserializedObject = serializer.deserialize(rec);
serializer.serialize(deserializedObject, serializationRecord);
serializationRecord.writeDataTo(deserializedRecordBuffer);
int previousOrdinal = map.get(deserializedRecordBuffer);
serializationRecord.reset();
deserializedRecordBuffer.reset();
if(previousOrdinal != -1 && !frameworkSerializer.isSerializationIntegrityFlawed()) {
typeDeserializationState.copyPrevious(currentOrdinal, previousOrdinal);
numObjectsReused++;
} else {
if(frameworkSerializer.isSerializationIntegrityFlawed()) {
numFlawedSerializationIntegrity++;
}
typeDeserializationState.add(currentOrdinal, rec);
}
frameworkSerializer.clearSerializationIntegrityFlawedFlag();
}
byteData.incrementStreamPosition(recordSize);
}
if(typeDeserializationState != null) {
typeDeserializationState.clearPreviousObjects();
typeDeserializationState.createIdentityOrdinalMap();
}
if(eventHandler != null) {
if(numObjects != 0) {
eventHandler.reusedObjects(schema.getName(), numObjectsReused);
}
if(numFlawedSerializationIntegrity != 0) {
eventHandler.objectsFailedReserialization(schema.getName(), numFlawedSerializationIntegrity);
}
}
}
}
| 8,350 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/io/FastBlobWriter.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.io;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import com.netflix.zeno.fastblob.FastBlobStateEngine;
import com.netflix.zeno.fastblob.record.VarInt;
import com.netflix.zeno.fastblob.state.FastBlobTypeDeserializationState;
import com.netflix.zeno.fastblob.state.FastBlobTypeSerializationState;
import com.netflix.zeno.fastblob.state.ThreadSafeBitSet;
/**
* Writes FastBlob images to streams.
*/
public class FastBlobWriter {
private final FastBlobStateEngine stateEngine;
private final int imageIndex;
private FastBlobHeaderWriter headerWriter;
public FastBlobWriter(FastBlobStateEngine stateEngine) {
this(stateEngine, 0);
}
public FastBlobWriter(FastBlobStateEngine stateEngine, int imageIndex) {
this(stateEngine, imageIndex, new ZenoFastBlobHeaderWriter());
}
public void setFastBlobHeaderWriter(FastBlobHeaderWriter headerWriter) {
this.headerWriter = headerWriter;
}
/**
* This FastBlobWriter will write a single image to a stream, as either a snapshot or delta.<p/>
*
* The configuration for the image which this will write is contained in the list returned by <code>stateEngine.getImageConfigurations()</code>
* at the index specified by <code>imageIndex</code>
*
* @param stateEngine
* @param imageIndex
*/
public FastBlobWriter(FastBlobStateEngine stateEngine, int imageIndex, FastBlobHeaderWriter headerWriter) {
this.stateEngine = stateEngine;
this.imageIndex = imageIndex;
this.headerWriter = headerWriter;
}
/**
* Write a snapshot to the specified stream.
*/
public void writeSnapshot(OutputStream os) throws Exception {
writeSnapshot(new DataOutputStream(os));
}
public void writeSnapshot(DataOutputStream os) throws IOException {
writeHeader(os);
for(FastBlobTypeSerializationState<?> typeState : stateEngine.getOrderedSerializationStates()) {
if(!typeState.isReadyForWriting())
throw new RuntimeException("This state engine is not ready for writing! Have you remembered to call stateEngine.prepareForWrite()?");
/// type flags byte -- reserved for later use
os.write(0);
/// write the schema
typeState.getSchema().writeTo(os);
ThreadSafeBitSet imageMembershipBitSet = typeState.getImageMembershipBitSet(imageIndex);
serializeTypeStateObjects(os, typeState, imageMembershipBitSet);
}
}
public void writeNonImageSpecificSnapshot(DataOutputStream os) throws IOException {
writeHeader(os);
for(FastBlobTypeSerializationState<?> typeState : stateEngine.getOrderedSerializationStates()) {
if(!typeState.isReadyForWriting())
throw new RuntimeException("This state engine is not ready for writing! Have you remembered to call stateEngine.prepareForWrite()?");
FastBlobTypeDeserializationState<?> typeDeserializationState = stateEngine.getTypeDeserializationState(typeState.getSchema().getName());
/// type flags byte -- reserved for later use
os.write(0);
/// write the schema
typeState.getSchema().writeTo(os);
serializeTypeStateObjects(os, typeState, typeDeserializationState);
}
}
/**
* Write a delta to the specified stream.
*/
public void writeDelta(OutputStream os) throws IOException {
writeDelta(new DataOutputStream(os));
}
public void writeDelta(DataOutputStream os) throws IOException {
writeHeader(os);
for(FastBlobTypeSerializationState<?> typeState : stateEngine.getOrderedSerializationStates()) {
if(!typeState.isReadyForWriting())
throw new RuntimeException("This state engine is not ready for writing! Have you remembered to call stateEngine.prepareForWrite()?");
/// type flags byte -- reserved for later use
os.write(0);
/// write the schema
typeState.getSchema().writeTo(os);
ThreadSafeBitSet currentImageMembershipBitSet = typeState.getImageMembershipBitSet(imageIndex);
ThreadSafeBitSet previousImageMembershipBitSet = typeState.getPreviousCycleImageMembershipBitSet(imageIndex);
serializeDelta(os, typeState, currentImageMembershipBitSet, previousImageMembershipBitSet);
}
}
/**
* Write a reverse delta to the specified stream.
*
* A reverse delta is the opposite of a delta. A delta removes all unused objects from the previous state and adds all
* new objects in the current state. A reverse delta removes all new objects in the current state and adds all unused
* objects from the previous state.
*/
public void writeReverseDelta(OutputStream os, String previousVersion) throws IOException {
writeReverseDelta(new DataOutputStream(os), previousVersion);
}
public void writeReverseDelta(DataOutputStream os, String previousVersion) throws IOException {
writeHeader(os, previousVersion);
for(FastBlobTypeSerializationState<?> typeState : stateEngine.getOrderedSerializationStates()) {
if(!typeState.isReadyForWriting())
throw new RuntimeException("This state engine is not ready for writing! Have you remembered to call stateEngine.prepareForWrite()?");
if(typeState.getPreviousStateSchema() != null) {
/// type flags byte -- reserved for later use
os.write(0);
/// write the schema
typeState.getPreviousStateSchema().writeTo(os);
ThreadSafeBitSet currentImageMembershipBitSet = typeState.getImageMembershipBitSet(imageIndex);
ThreadSafeBitSet previousImageMembershipBitSet = typeState.getPreviousCycleImageMembershipBitSet(imageIndex);
serializeDelta(os, typeState, previousImageMembershipBitSet, currentImageMembershipBitSet);
}
}
}
private void serializeDelta(DataOutputStream os, FastBlobTypeSerializationState<?> typeState, ThreadSafeBitSet currentStateOrdinals, ThreadSafeBitSet prevStateOrdinals) throws IOException {
/// get all of the ordinals contained in the previous cycle, which are no longer contained in this cycle. These all need to be removed.
ThreadSafeBitSet removedTypeStateObjectsBitSet = prevStateOrdinals.andNot(currentStateOrdinals);
serializeTypeStateRemovals(os, removedTypeStateObjectsBitSet);
/// get all of the ordinals contained in this cycle, which were not contained in the previous cycle. These all need to be added.
ThreadSafeBitSet addedTypeStateObjectsBitSet = currentStateOrdinals.andNot(prevStateOrdinals);
serializeTypeStateObjects(os, typeState, addedTypeStateObjectsBitSet);
}
private void writeHeader(DataOutputStream os) throws IOException {
String version = stateEngine.getLatestVersion() != null ? stateEngine.getLatestVersion() : "";
writeHeader(os, version);
}
private void writeHeader(DataOutputStream os, String version) throws IOException {
FastBlobHeader header = new FastBlobHeader();
header.setVersion(version);
header.setHeaderTags(stateEngine.getHeaderTags());
/// The deserialization StreamingByteData buffer size needs to accommodate the largest single object.
/// write the ceil(log2(maxSize)) as a single byte at the beginning of the stream.
/// upon deserialization, this byte will be read and the StreamingByteData buffer can be sized appropriately.
int deserializationBufferSizeHint = 32 - Integer.numberOfLeadingZeros(stateEngine.getMaxSingleObjectLength() - 1);
header.setDeserializationBufferSizeHint(deserializationBufferSizeHint);
header.setNumberOfTypes(stateEngine.getOrderedSerializationStates().size());
headerWriter.writeHeader(header,stateEngine,os);
}
private void serializeTypeStateObjects(DataOutputStream os, FastBlobTypeSerializationState<?> typeState, ThreadSafeBitSet includeOrdinals) throws IOException {
int currentBitSetCapacity = includeOrdinals.currentCapacity();
int currentOrdinal = 0;
/// write the number of objects
VarInt.writeVInt(os, includeOrdinals.cardinality());
for(int i=0;i<currentBitSetCapacity;i++) {
if(includeOrdinals.get(i)) {
/// gap-encoded ordinals
VarInt.writeVInt(os, i - currentOrdinal);
currentOrdinal = i;
/// typeState will use the ByteArrayOrdinalMap to write the length and
/// serialized representation of the object.
typeState.writeObjectTo(os, i);
}
}
}
private void serializeTypeStateObjects(DataOutputStream os, FastBlobTypeSerializationState<?> typeState,
FastBlobTypeDeserializationState<?> typeDeserializationState) throws IOException {
/// write the number of objects
VarInt.writeVInt(os, typeDeserializationState.countObjects());
int currentOrdinal = 0;
for(int i=0;i<=typeDeserializationState.maxOrdinal();i++) {
Object obj = typeDeserializationState.get(i);
if(obj != null) {
/// gap-encoded ordinals
VarInt.writeVInt(os, i - currentOrdinal);
currentOrdinal = i;
/// typeState will use the ByteArrayOrdinalMap to write the length and
/// serialized representation of the object.
typeState.writeObjectTo(os, i);
}
}
}
private void serializeTypeStateRemovals(DataOutputStream os, ThreadSafeBitSet removals) throws IOException {
int bitSetCapacity = removals.currentCapacity();
int currentRemoval = 0;
/// write the number of removals
VarInt.writeVInt(os, removals.cardinality());
for(int i=0;i<bitSetCapacity;i++) {
if(removals.get(i)) {
/// gap-encoded ordinals
VarInt.writeVInt(os, i - currentRemoval);
currentRemoval = i;
}
}
}
}
| 8,351 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/io/FastBlobReaderEventHandler.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.io;
public interface FastBlobReaderEventHandler {
public void addedObjects(String typeName, int numAdditions);
public void removedObjects(String typeName, int numRemovals);
public void reusedObjects(String typeName, int numRemovals);
public void objectsFailedReserialization(String typeName, int numFailures);
}
| 8,352 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/io/FastBlobHeader.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.io;
import java.util.HashMap;
import java.util.Map;
/**
* Represents the fast blob header
*
* @author plakhina
*
*/
public class FastBlobHeader {
public static final int FAST_BLOB_VERSION_HEADER = 1029;
private String version = "";
private Map<String, String> headerTags = new HashMap<String, String>();
private int deserializationBufferSizeHint;
private int numberOfTypes;
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
public Map<String, String> getHeaderTags() {
return headerTags;
}
public void setHeaderTags(Map<String, String> headerTags) {
this.headerTags = headerTags;
}
public int getDeserializationBufferSizeHint() {
return deserializationBufferSizeHint;
}
public void setDeserializationBufferSizeHint(int deserializationBufferSizeHint) {
this.deserializationBufferSizeHint = deserializationBufferSizeHint;
}
public int getNumberOfTypes() {
return numberOfTypes;
}
public void setNumberOfTypes(int numberOfTypes) {
this.numberOfTypes = numberOfTypes;
}
@Override
public boolean equals(Object other) {
if(other instanceof FastBlobHeader) {
FastBlobHeader oh = (FastBlobHeader)other;
return version.equals(oh.getVersion()) &&
headerTags.equals(oh.getHeaderTags()) &&
deserializationBufferSizeHint == oh.getDeserializationBufferSizeHint() &&
numberOfTypes == oh.getNumberOfTypes();
}
return false;
}
}
| 8,353 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/io/FastBlobHeaderReader.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.io;
import java.io.IOException;
import java.io.InputStream;
public interface FastBlobHeaderReader {
public FastBlobHeader readHeader(InputStream is) throws IOException,FastBlobMalformedDataException;
}
| 8,354 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/io/FastBlobHeaderWriter.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.io;
import com.netflix.zeno.fastblob.FastBlobStateEngine;
import java.io.IOException;
import java.io.DataOutputStream;
public interface FastBlobHeaderWriter {
/**
* Write the header to the data output stream
* @param header
* @param dos
*/
public void writeHeader(FastBlobHeader header, FastBlobStateEngine stateEngine, DataOutputStream dos) throws IOException;
}
| 8,355 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/io/ZenoFastBlobHeaderWriter.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.io;
import com.netflix.zeno.fastblob.FastBlobStateEngine;
import com.netflix.zeno.fastblob.record.VarInt;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Map;
public class ZenoFastBlobHeaderWriter implements FastBlobHeaderWriter {
public void writeHeader(FastBlobHeader header, FastBlobStateEngine stateEngine, DataOutputStream dos) throws IOException {
/// save 4 bytes to indicate FastBlob version header. This will be changed to indicate backwards incompatibility.
dos.writeInt(FastBlobHeader.FAST_BLOB_VERSION_HEADER);
/// write the version from the state engine
dos.writeUTF(header.getVersion());
/// write the header tags -- intended to include input source data versions
dos.writeShort(header.getHeaderTags().size());
for (Map.Entry<String, String> headerTag : header.getHeaderTags().entrySet()) {
dos.writeUTF(headerTag.getKey());
dos.writeUTF(headerTag.getValue());
}
dos.write(header.getDeserializationBufferSizeHint());
/// flags byte -- reserved for later use
dos.write(0);
VarInt.writeVInt(dos, header.getNumberOfTypes());
}
}
| 8,356 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/io/FastBlobMalformedDataException.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.io;
@SuppressWarnings("serial")
public class FastBlobMalformedDataException extends RuntimeException {
public FastBlobMalformedDataException() {
}
public FastBlobMalformedDataException(final String message) {
super(message);
}
public FastBlobMalformedDataException(final Throwable cause) {
super(cause);
}
public FastBlobMalformedDataException(final String message, final Throwable cause) {
super(message, cause);
}
}
| 8,357 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/io/ZenoFastBlobHeaderReader.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.io;
import com.netflix.zeno.fastblob.record.VarInt;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
public class ZenoFastBlobHeaderReader implements FastBlobHeaderReader {
@Override
public FastBlobHeader readHeader(InputStream is) throws IOException, FastBlobMalformedDataException {
FastBlobHeader header = new FastBlobHeader();
DataInputStream dis = new DataInputStream(is);
int headerVersion = dis.readInt();
if(headerVersion != FastBlobHeader.FAST_BLOB_VERSION_HEADER) {
throw new FastBlobMalformedDataException("The FastBlob you are trying to read is incompatible. The expected FastBlob version was " + FastBlobHeader.FAST_BLOB_VERSION_HEADER + " but the actual version was " + headerVersion);
}
String latestVersion = dis.readUTF();
header.setVersion(latestVersion);
Map<String, String> headerTags = readHeaderTags(dis);
header.setHeaderTags(headerTags);
int deserializationBufferSizeHint = is.read();
header.setDeserializationBufferSizeHint(deserializationBufferSizeHint);
dis.read(); //Flags byte. Reserved for later use
int numTypes = VarInt.readVInt(is);
header.setNumberOfTypes(numTypes);
return header;
}
/**
* Map of string header tags reading.
*
* @param dis
* @throws IOException
*/
private Map<String, String> readHeaderTags(DataInputStream dis) throws IOException {
int numHeaderTags = dis.readShort();
Map<String, String> headerTags = new HashMap<String, String>();
for (int i = 0; i < numHeaderTags; i++) {
headerTags.put(dis.readUTF(), dis.readUTF());
}
return headerTags;
}
}
| 8,358 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/state/FreeOrdinalTracker.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.state;
import com.netflix.zeno.fastblob.record.VarInt;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Arrays;
/**
* A stack of unused ordinals.<p/>
*
* This helps fill the "holes" generated by removing unused ordinals during server processing.
*
* @author dkoszewnik
*
*/
public class FreeOrdinalTracker {
private int freeOrdinals[];
private int size;
private int nextEmptyOrdinal;
public FreeOrdinalTracker() {
this(0);
}
private FreeOrdinalTracker(int nextEmptyOrdinal) {
this.freeOrdinals = new int[64];
this.nextEmptyOrdinal = nextEmptyOrdinal;
this.size = 0;
}
/**
* @return either an ordinal which was previously deallocated, or the next empty, previously unallocated ordinal in the sequence 0-n
*/
public int getFreeOrdinal() {
if(size == 0)
return nextEmptyOrdinal++;
return freeOrdinals[--size];
}
/**
* Return an ordinal to the pool after the object to which it was assigned is discarded.
*
* @param ordinal
*/
public void returnOrdinalToPool(int ordinal) {
if(size == freeOrdinals.length) {
freeOrdinals = Arrays.copyOf(freeOrdinals, freeOrdinals.length * 3 / 2);
}
freeOrdinals[size] = ordinal;
size++;
}
public void serializeTo(OutputStream os) throws IOException {
VarInt.writeVInt(os, nextEmptyOrdinal);
VarInt.writeVInt(os, size);
for(int i=0;i<size;i++) {
VarInt.writeVInt(os, freeOrdinals[i]);
}
}
public static FreeOrdinalTracker deserializeFrom(InputStream is) throws IOException {
int nextEmptyOrdinal = VarInt.readVInt(is);
FreeOrdinalTracker tracker = new FreeOrdinalTracker(nextEmptyOrdinal);
int numFreeOrdinals = VarInt.readVInt(is);
for(int i=0;i<numFreeOrdinals;i++) {
tracker.returnOrdinalToPool(VarInt.readVInt(is));
}
return tracker;
}
}
| 8,359 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/state/ObjectIdentityOrdinalMap.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.state;
import java.util.Arrays;
import java.util.List;
/**
* A mapping of objects to their ordinals. For the purposes of this mapping, objects are only found if they are == to each other.<p/>
*
* This is used during "heap-friendly" double snapshot refreshes with the FastBlob.<p/>
*
* The vast majority of the extra memory required to maintain this mapping is the hashedOrdinals[] array, which is just an int array. The values of this
* array are the ordinals of the objects located at the position of each object's identity hash. Collisions are resolved via linear probing.
*
* @author dkoszewnik
*
*/
public class ObjectIdentityOrdinalMap {
private final List<Object> objects;
private final int hashedOrdinals[];
private final int hashModMask;
/**
* The List of Objects passed in here should be the same list as held by the FastBlobTypeDeserializationState.<p/>
*
* These Objects should be arranged in ordinal order, that is, the Object with ordinal x is contained at index x in the List.
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
public ObjectIdentityOrdinalMap(List objects) {
int size = 0;
for(int i=0;i<objects.size();i++) {
if(objects.get(i) != null)
size++;
}
int arraySize = (size * 10) / 8; // 80% load factor
arraySize = 1 << (32 - Integer.numberOfLeadingZeros(arraySize));
hashedOrdinals = new int[arraySize];
hashModMask = arraySize - 1;
Arrays.fill(hashedOrdinals, -1);
for(int i=0;i<objects.size();i++) {
if(objects.get(i) != null)
put(objects.get(i), i);
}
this.objects = objects;
}
private void put(Object obj, int ordinal) {
int hash = rehash(System.identityHashCode(obj));
int bucket = hash & hashModMask;
while(hashedOrdinals[bucket] != -1)
bucket = (bucket + 1) & hashModMask;
hashedOrdinals[bucket] = ordinal;
}
public int get(Object obj) {
int hash = rehash(System.identityHashCode(obj));
int bucket = hash & hashModMask;
while(hashedOrdinals[bucket] != -1) {
if(objects.get(hashedOrdinals[bucket]) == obj)
return hashedOrdinals[bucket];
bucket = (bucket + 1) & hashModMask;
}
return -1;
}
private int rehash(int hash) {
hash = ~hash + (hash << 15);
hash = hash ^ (hash >>> 12);
hash = hash + (hash << 2);
hash = hash ^ (hash >>> 4);
hash = hash * 2057;
hash = hash ^ (hash >>> 16);
return hash & Integer.MAX_VALUE;
}
}
| 8,360 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/state/TypeDeserializationStateListener.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.state;
/**
* A TypeDeserializationStateListener will listen to modifications which are made to the type state
* during blob consumption. These modifications will be communicated as a set of instances which are
* removed and a set of instances which are added.
*
* @author dkoszewnik
*/
public abstract class TypeDeserializationStateListener<T> {
/**
* Called once each time an instance is removed from the TypeDeserializationState<p/>
*
* @deprecated use removedObject(T obj, int ordinal) instead.
*/
@Deprecated
public void removedObject(T obj) { }
/**
* Called once each time an instance is removed from the TypeDeserializationState.<p/>
*
* Please note that in the case of a double snapshot load, all object ordinals are shuffled.
* In this case, the "obj" parameter may not currently be assigned to the provided ordinal, and
* addedObject may have been called with a different object at the same ordinal.
*/
public abstract void removedObject(T obj, int ordinal);
/**
* Called once each time an instance's ordinal is reassigned. This will happen in the case of a double snapshot reload.<p/>
*
* This is called once for every object which is copied from the previous state, whether or not the ordinal has changed. In some cases,
* oldOrdinal and newOrdinal may be the same value.
*/
public abstract void reassignedObject(T obj, int oldOrdinal, int newOrdinal);
/**
* Called once each time an instance is added to the TypeSerializationState
*
* @deprecated use addedObject(T obj, int ordinal) instead.
*/
@Deprecated
public void addedObject(T obj) { }
/**
* Called once each time an object instance is added to the TypeSerializationState
*/
public abstract void addedObject(T obj, int ordinal);
private static final TypeDeserializationStateListener<Object> NOOP_CALLBACK =
new TypeDeserializationStateListener<Object>() {
@Override
public void removedObject(Object obj, int ordinal) { }
@Override
public void addedObject(Object obj, int ordinal) { }
@Override
public void reassignedObject(Object obj, int oldOrdinal, int newOrdinal) { }
};
/**
* @return a callback which does nothing with modification events
*/
@SuppressWarnings("unchecked")
public static <T> TypeDeserializationStateListener<T> noopCallback() {
return (TypeDeserializationStateListener<T>) NOOP_CALLBACK;
}
}
| 8,361 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/state/WeakObjectOrdinalMap.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.state;
import java.lang.ref.ReferenceQueue;
import java.lang.ref.WeakReference;
import java.util.Arrays;
/**
* Weak hash lookup map associate object references to already seen ordinals.
* The fundamental assumption made here is that objects are immutable, so that
* once the ordinal is assigned to an object, the ordinal stays the same
* throughout the life of the object.
*
* @author timurua
*
*/
public class WeakObjectOrdinalMap {
/**
* Hashmap entry
*/
public static final class Entry extends WeakReference<Object> {
// identity hashcode
private int hash;
// ordinal
private int ordinal;
// membership flags
private long imageMembershipsFlags;
// linked list pointer
private Entry next;
/**
* Creates new entry.
*/
Entry(Object key, ReferenceQueue<Object> queue, int hash, int ordinal, long imageMembershipsFlags, Entry next) {
super(key, queue);
this.hash = hash;
this.ordinal = ordinal;
this.imageMembershipsFlags = imageMembershipsFlags;
this.next = next;
}
public int getOrdinal() {
return ordinal;
}
public long getImageMembershipsFlags() {
return imageMembershipsFlags;
}
public boolean hasImageMembershipsFlags(long newImageMembershipsFlags) {
return (imageMembershipsFlags | newImageMembershipsFlags) == imageMembershipsFlags;
}
@Override
public String toString() {
Object v = get();
return v == null ? "null" : v.toString();
}
}
/**
* The map is divided into segments to increase concurrency
*/
private class Segment {
// The same concept as in HashMap. If the entry array is becoming too
// dense, it should be increased
private static final int LOAD_FACTOR_PERCENT = 75;
private static final int MINIMUM_CAPACITY = 256;
private static final int MAXIMUM_CAPACITY = (1<<30);
private int count = 0;
private int maxThreshold = 0;
private int minThreshold = 0;
private Entry[] entries;
private final ReferenceQueue<Object> queue = new ReferenceQueue<Object>();
public Segment(){
resize(MINIMUM_CAPACITY);
}
public synchronized void put(Object object, int hashCode, int ordinal, long imageMembershipsFlags) {
removeGarbageCollectedEntities();
int index = index(hashCode, entries.length);
Entry current = entries[index];
Entry prev = null;
while (current != null) {
if (current.hash == hashCode) {
Object currentObject = current.get();
if( currentObject == null){
deleteEntry(index, current, prev);
current = current.next;
continue;
} else if (currentObject == object) {
current.imageMembershipsFlags = (current.imageMembershipsFlags | imageMembershipsFlags);
return;
}
}
prev = current;
current = current.next;
}
count++;
Entry first = entries[index];
Entry entry = new Entry(object, queue, hashCode, ordinal, imageMembershipsFlags, first);
entries[index] = entry;
entry.next = first;
checkSize();
return;
}
public synchronized Entry get(Object object, int hashCode) {
removeGarbageCollectedEntities();
int index = index(hashCode, entries.length);
Entry current = entries[index];
Entry prev = null;
while (current != null) {
if (current.hash == hashCode) {
Object currentObject = current.get();
if( currentObject == null){
deleteEntry(index, current, prev);
current = current.next;
continue;
} else if (currentObject == object) {
return current;
}
}
prev = current;
current = current.next;
}
return null;
}
private void checkSize() {
if( count >= minThreshold && count <= maxThreshold ){
return;
}
int newCapacity;
if( count < minThreshold ) {
newCapacity = Math.max(MINIMUM_CAPACITY, entries.length >> 1);
} else {
newCapacity = Math.min(MAXIMUM_CAPACITY, entries.length << 1);
}
// nothing should be done, since capacity is not changed
if (newCapacity == entries.length) {
return;
}
resize(newCapacity);
}
private void resize(int newCapacity) {
Entry[] newEntries = new Entry[newCapacity];
if( entries != null){
for(Entry entry : entries){
Entry current = entry;
while(current != null){
Entry newEntry = current;
current = current.next;
int index = index(newEntry.hash, newEntries.length);
newEntry.next = newEntries[index];
newEntries[index] = newEntry;
}
}
}
minThreshold = (newEntries.length == MINIMUM_CAPACITY) ? 0 : (newEntries.length * LOAD_FACTOR_PERCENT / 200);
maxThreshold = (newEntries.length == MAXIMUM_CAPACITY) ? Integer.MAX_VALUE : newEntries.length * LOAD_FACTOR_PERCENT / 100;
entries = newEntries;
}
private void removeGarbageCollectedEntities() {
for (Object x; (x = queue.poll()) != null; ) {
Entry entry = (Entry) x;
int index = index(entry.hash, entries.length);
Entry current = entries[index];
Entry prev = null;
while (current != null) {
if (current == entry) {
deleteEntry(index, current, prev);
break;
}
prev = current;
current = current.next;
}
}
checkSize();
}
private void deleteEntry(int index, Entry current, Entry prev) {
count--;
if (prev != null) {
prev.next = current.next;
} else {
entries[index] = current.next;
}
}
private final int index(int hashCode, int capacity) {
return (hashCode >>> WeakObjectOrdinalMap.this.logOfSegmentNumber) % capacity;
}
public synchronized void clear() {
while (queue.poll() != null)
;
Arrays.fill(entries, null);
count = 0;
resize(MINIMUM_CAPACITY);
while (queue.poll() != null)
;
}
public synchronized int size() {
removeGarbageCollectedEntities();
return count;
}
}
private final Segment[] segments;
private final int mask;
private final int logOfSegmentNumber;
public WeakObjectOrdinalMap(int logOfSegmentNumber) {
if (logOfSegmentNumber < 1 && logOfSegmentNumber > 32) {
throw new RuntimeException("Invalid power level");
}
segments = new Segment[2 << logOfSegmentNumber];
for(int i=0; i<segments.length; i++){
segments[i] = new Segment();
}
this.mask = (2 << logOfSegmentNumber) - 1;
this.logOfSegmentNumber = logOfSegmentNumber;
}
/**
* Associating the obj with an ordinal
*
* @param obj
* @param ordinal
*/
public void put(Object obj, int ordinal, long imageMembershipsFlags) {
int hashCode = System.identityHashCode(obj);
int segment = segment(hashCode);
segments[segment].put(obj, hashCode, ordinal, imageMembershipsFlags);
}
public Entry getEntry(Object obj) {
int hashCode = System.identityHashCode(obj);
int segment = segment(hashCode);
return segments[segment].get(obj, hashCode);
}
private final int segment(int hashCode) {
return hashCode & mask;
}
public void clear(){
for (Segment segment : segments) {
segment.clear();
}
}
public int size() {
int size = 0;
for (Segment segment : segments) {
size += segment.size();
}
return size;
}
}
| 8,362 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/state/ThreadSafeBitSet.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.state;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicLongArray;
import java.util.concurrent.atomic.AtomicReference;
/**
* This is a lock-free, thread-safe version of a {@link java.util.BitSet}.<p/>
*
* Instead of a long array to hold the bits, this implementation uses an AtomicLongArray, then
* does the appropriate compare-and-swap operations when setting the bits.
*
* @author dkoszewnik
*
*/
public class ThreadSafeBitSet {
private final int numLongsPerSegment;
private final int log2SegmentSize;
private final int segmentMask;
private final AtomicReference<ThreadSafeBitSetSegments> segments;
public ThreadSafeBitSet() {
this(14); /// 16384 bits, 2048 bytes, 256 longs per segment
}
public ThreadSafeBitSet(int log2SegmentSizeInBits) {
if(log2SegmentSizeInBits < 6)
throw new IllegalArgumentException("Cannot specify fewer than 64 bits in each segment!");
this.log2SegmentSize = log2SegmentSizeInBits;
this.numLongsPerSegment = (1 << (log2SegmentSizeInBits - 6));
this.segmentMask = numLongsPerSegment - 1;
segments = new AtomicReference<ThreadSafeBitSetSegments>();
segments.set(new ThreadSafeBitSetSegments(1, numLongsPerSegment));
}
private ThreadSafeBitSet(ThreadSafeBitSetSegments segments, int log2SegmentSizeInBits) {
this.log2SegmentSize = log2SegmentSizeInBits;
this.numLongsPerSegment = (1 << (log2SegmentSizeInBits - 6));
this.segmentMask = numLongsPerSegment - 1;
this.segments = new AtomicReference<ThreadSafeBitSetSegments>();
this.segments.set(segments);
}
public void set(int position) {
int segmentPosition = position >>> log2SegmentSize; /// which segment -- div by num bits per segment
int longPosition = (position >>> 6) & segmentMask; /// which long in the segment -- remainder of div by num bits per segment
int bitPosition = position & 0x3F; /// which bit in the long -- remainder of div by num bits in long (64)
AtomicLongArray segment = getSegment(segmentPosition);
long mask = 1L << bitPosition;
// Thread safety: we need to loop until we win the race to set the long value.
while(true) {
// determine what the new long value will be after we set the appropriate bit.
long currentLongValue = segment.get(longPosition);
long newLongValue = currentLongValue | mask;
// if no other thread has modified the value since we read it, we won the race and we are done.
if(segment.compareAndSet(longPosition, currentLongValue, newLongValue))
break;
}
}
public boolean get(int position) {
int segmentPosition = position >>> log2SegmentSize; /// which segment -- div by num bits per segment
int longPosition = (position >>> 6) & segmentMask; /// which long in the segment -- remainder of div by num bits per segment
int bitPosition = position & 0x3F; /// which bit in the long -- remainder of div by num bits in long (64)
AtomicLongArray segment = getSegment(segmentPosition);
long mask = 1L << bitPosition;
return ((segment.get(longPosition) & mask) != 0);
}
/**
* @return the number of bits which are set in this bit set.
*/
public int cardinality() {
ThreadSafeBitSetSegments segments = this.segments.get();
int numSetBits = 0;
for(int i=0;i<segments.numSegments();i++) {
AtomicLongArray segment = segments.getSegment(i);
for(int j=0;j<segment.length();j++) {
numSetBits += Long.bitCount(segment.get(j));
}
}
return numSetBits;
}
/**
* @return the number of bits which are current specified by this bit set. This is the maximum value
* to which you might need to iterate, if you were to iterate over all bits in this set.
*/
public int currentCapacity() {
return segments.get().numSegments() * (1 << log2SegmentSize);
}
/**
* Clear all bits to 0.
*/
public void clearAll() {
ThreadSafeBitSetSegments segments = this.segments.get();
for(int i=0;i<segments.numSegments();i++) {
AtomicLongArray segment = segments.getSegment(i);
for(int j=0;j<segment.length();j++) {
segment.set(j, 0L);
}
}
}
/**
* Return a new bit set which contains all bits which are contained in this bit set, and which are NOT contained in the <code>other</code> bit set.<p/>
*
* In other words, return a new bit set, which is a bitwise and with the bitwise not of the other bit set.
*
* @param other
* @return
*/
public ThreadSafeBitSet andNot(ThreadSafeBitSet other) {
if(other.log2SegmentSize != log2SegmentSize)
throw new IllegalArgumentException("Segment sizes must be the same");
ThreadSafeBitSetSegments thisSegments = this.segments.get();
ThreadSafeBitSetSegments otherSegments = other.segments.get();
ThreadSafeBitSetSegments newSegments = new ThreadSafeBitSetSegments(thisSegments.numSegments(), numLongsPerSegment);
for(int i=0;i<thisSegments.numSegments();i++) {
AtomicLongArray thisArray = thisSegments.getSegment(i);
AtomicLongArray otherArray = (i < otherSegments.numSegments()) ? otherSegments.getSegment(i) : null;
AtomicLongArray newArray = newSegments.getSegment(i);
for(int j=0;j<thisArray.length();j++) {
long thisLong = thisArray.get(j);
long otherLong = (otherArray == null) ? 0 : otherArray.get(j);
newArray.set(j, thisLong & ~otherLong);
}
}
ThreadSafeBitSet andNot = new ThreadSafeBitSet(log2SegmentSize);
andNot.segments.set(newSegments);
return andNot;
}
/**
* Return a new bit set which contains all bits which are contained in *any* of the specified bit sets.
*
* @param bitSets
* @return
*/
public static ThreadSafeBitSet orAll(ThreadSafeBitSet... bitSets) {
if(bitSets.length == 0)
return new ThreadSafeBitSet();
int log2SegmentSize = bitSets[0].log2SegmentSize;
int numLongsPerSegment = bitSets[0].numLongsPerSegment;
ThreadSafeBitSetSegments segments[] = new ThreadSafeBitSetSegments[bitSets.length];
int maxNumSegments = 0;
for(int i=0;i<bitSets.length;i++) {
if(bitSets[i].log2SegmentSize != log2SegmentSize)
throw new IllegalArgumentException("Segment sizes must be the same");
segments[i] = bitSets[i].segments.get();
if(segments[i].numSegments() > maxNumSegments)
maxNumSegments = segments[i].numSegments();
}
ThreadSafeBitSetSegments newSegments = new ThreadSafeBitSetSegments(maxNumSegments, numLongsPerSegment);
AtomicLongArray segment[] = new AtomicLongArray[segments.length];
for(int i=0;i<maxNumSegments;i++) {
for(int j=0;j<segments.length;j++) {
segment[j] = i < segments[j].numSegments() ? segments[j].getSegment(i) : null;
}
AtomicLongArray newSegment = newSegments.getSegment(i);
for(int j=0;j<numLongsPerSegment;j++) {
long value = 0;
for(int k=0;k<segments.length;k++) {
if(segment[k] != null)
value |= segment[k].get(j);
}
newSegment.set(j, value);
}
}
ThreadSafeBitSet or = new ThreadSafeBitSet(log2SegmentSize);
or.segments.set(newSegments);
return or;
}
/**
* Get the segment at <code>segmentIndex</code>. If this segment does not yet exist, create it.
*
* @param segmentIndex
* @return
*/
private AtomicLongArray getSegment(int segmentIndex) {
ThreadSafeBitSetSegments visibleSegments = segments.get();
while(visibleSegments.numSegments() <= segmentIndex) {
/// Thread safety: newVisibleSegments contains all of the segments from the currently visible segments, plus extra.
/// all of the segments in the currently visible segments are canonical and will not change.
ThreadSafeBitSetSegments newVisibleSegments = new ThreadSafeBitSetSegments(visibleSegments, segmentIndex + 1, numLongsPerSegment);
/// because we are using a compareAndSet, if this thread "wins the race" and successfully sets this variable, then the segments
/// which are newly defined in newVisibleSegments become canonical.
if(segments.compareAndSet(visibleSegments, newVisibleSegments)) {
visibleSegments = newVisibleSegments;
} else {
/// If we "lose the race" and are growing the ThreadSafeBitSet segments larger,
/// then we will gather the new canonical sets from the update which we missed on the next iteration of this loop.
/// Newly defined segments in newVisibleSegments will be discarded, they do not get to become canonical.
visibleSegments = segments.get();
}
}
return visibleSegments.getSegment(segmentIndex);
}
private static class ThreadSafeBitSetSegments {
private final AtomicLongArray segments[];
private ThreadSafeBitSetSegments(int numSegments, int segmentLength) {
AtomicLongArray segments[] = new AtomicLongArray[numSegments];
for(int i=0;i<numSegments;i++) {
segments[i] = new AtomicLongArray(segmentLength);
}
/// Thread safety: Because this.segments is final, the preceding operations in this constructor are guaranteed to be visible to any
/// other thread which accesses this.segments.
this.segments = segments;
}
private ThreadSafeBitSetSegments(ThreadSafeBitSetSegments copyFrom, int numSegments, int segmentLength) {
AtomicLongArray segments[] = new AtomicLongArray[numSegments];
for(int i=0;i<numSegments;i++) {
segments[i] = i < copyFrom.numSegments() ? copyFrom.getSegment(i) : new AtomicLongArray(segmentLength);
}
/// see above re: thread-safety of this assignment
this.segments = segments;
}
public int numSegments() {
return segments.length;
}
public AtomicLongArray getSegment(int index) {
return segments[index];
}
}
/**
* Serialize this ThreadSafeBitSet to an OutputStream
*/
public void serializeTo(DataOutputStream os) throws IOException {
os.write(log2SegmentSize);
ThreadSafeBitSetSegments segments = this.segments.get();
os.writeInt(segments.numSegments());
for(int i=0;i<segments.numSegments();i++) {
AtomicLongArray arr = segments.getSegment(i);
for(int j=0;j<arr.length();j++) {
os.writeLong(arr.get(j));
}
}
}
/**
* Deserialize a ThreadSafeBitSet from an InputStream
*/
public static ThreadSafeBitSet deserializeFrom(DataInputStream dis) throws IOException {
int log2SegmentSize = dis.read();
int numLongsPerSegment = (1 << (log2SegmentSize - 6));
int numSegments = dis.readInt();
ThreadSafeBitSetSegments segments = new ThreadSafeBitSetSegments(numSegments, numLongsPerSegment);
for(int i=0;i<segments.numSegments();i++) {
AtomicLongArray arr = segments.getSegment(i);
for(int j=0;j<numLongsPerSegment;j++) {
arr.set(j, dis.readLong());
}
}
return new ThreadSafeBitSet(segments, log2SegmentSize);
}
}
| 8,363 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/state/FastBlobTypeSerializationState.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.state;
import com.netflix.zeno.fastblob.FastBlobImageUtils;
import com.netflix.zeno.fastblob.FastBlobStateEngine;
import com.netflix.zeno.fastblob.OrdinalMapping;
import com.netflix.zeno.fastblob.record.ByteDataBuffer;
import com.netflix.zeno.fastblob.record.FastBlobSerializationRecord;
import com.netflix.zeno.fastblob.record.schema.FastBlobSchema;
import com.netflix.zeno.fastblob.state.WeakObjectOrdinalMap.Entry;
import com.netflix.zeno.serializer.NFTypeSerializer;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
/**
* This class represents the "serialization state" for a single type at some level of the object
* hierarchy in the serialized data.<p/>
*
* This class is responsible for maintaining the mappings between serialized representations of
* its type and ordinals. It performs this responsibility by using a {@link ByteArrayOrdinalMap}.<p/>
*
* This class is also responsible for maintaining data about the set of objects<p/>
*
* This class has a lifecycle during which it alternates between two states:<p/>
*
* <ol>
* <li>Safe to add objects, but not safe to write contained objects to a stream.</li>
* <li>Not safe to add objects, but safe to write contained objects to a stream.</li>
* </ol><p/>
*
* Initially the object will be in state (1).<br/>
* From state (1), if prepareForWrite() is called, it will be transitioned to state (2).<br/>
* From state (2), calling prepareForNextCycle() will transition back to state (1).<p/>
*
* It is safe for multiple threads to add to this state engine or write from this state engine. It<br/>
* is not safe for multiple threads to make lifecycle transitions (all threads must agree on a single state).
*
* @author dkoszewnik
*
*/
public class FastBlobTypeSerializationState<T> {
public final NFTypeSerializer<T> serializer;
private FastBlobSchema typeSchema;
private FastBlobSchema previousStateTypeSchema;
private final ThreadLocal<FastBlobSerializationRecord> serializationRecord;
private final ThreadLocal<ByteDataBuffer> serializedScratchSpace;
private ByteArrayOrdinalMap ordinalMap;
private ThreadSafeBitSet imageMemberships[];
private ThreadSafeBitSet previousCycleImageMemberships[];
private WeakObjectOrdinalMap objectOrdinalMap;
/**
*
* @param serializer
* The NFTypeSerializer for this state's type.
* @param numImages
* The number of blob images which will be produced by the
* {@link FastBlobStateEngine}.
*/
public FastBlobTypeSerializationState(NFTypeSerializer<T> serializer, int numImages) {
this(serializer, numImages, true);
}
/**
*
* @param serializer The NFTypeSerializer for this state's type.
* @param numImages The number of blob images which will be produced by the {@link FastBlobStateEngine}.
*/
public FastBlobTypeSerializationState(NFTypeSerializer<T> serializer, int numImages, boolean shouldUseObjectIdentityOrdinalCaching) {
this.serializer = serializer;
this.typeSchema = serializer.getFastBlobSchema();
this.serializationRecord = new ThreadLocal<FastBlobSerializationRecord>();
this.serializedScratchSpace = new ThreadLocal<ByteDataBuffer>();
this.ordinalMap = new ByteArrayOrdinalMap();
this.imageMemberships = initializeImageMembershipBitSets(numImages);
this.previousCycleImageMemberships = initializeImageMembershipBitSets(numImages);
if (shouldUseObjectIdentityOrdinalCaching) {
objectOrdinalMap = new WeakObjectOrdinalMap(8);
}
}
public String getName() {
return serializer.getName();
}
public FastBlobSchema getSchema() {
return typeSchema;
}
/**
* This is only useful when we start a new server with a different schema than the previous server.
* The previous state schema gets loaded from the previously serialized previousStateTypeSchemaserver state.
*
*/
public FastBlobSchema getPreviousStateSchema() {
return previousStateTypeSchema;
}
/**
* Add an object to this state. We will create a serialized representation
* of this object, then assign or retrieve the ordinal for this serialized
* representation in our {@link ByteArrayOrdinalMap}
* <p/>
*
* Because the FastBlobStateEngine can represent multiple images, it must be
* specified in *which* images this object should be included. This is
* accomplished with a boolean array. If the object is included in a
* specific image, then the imageMembershipsFlag array will contain the
* boolean value "true", at the index in which that image appears in the
* list returned by {@link FastBlobStateEngine}.getImageConfigurations()
*
* @param data
* @param imageMembershipsFlags
*/
@Deprecated
public int add(T data, boolean[] imageMembershipsFlags) {
return add(data, FastBlobImageUtils.toLong(imageMembershipsFlags));
}
/**
* Add an object to this state. We will create a serialized representation of this object, then
* assign or retrieve the ordinal for this serialized representation in our {@link ByteArrayOrdinalMap}<p/>
*
* Because the FastBlobStateEngine can represent multiple images, it must be specified in *which* images
* this object should be included. This is accomplished with a boolean array. If the object is included
* in a specific image, then the imageMembershipsFlag array will contain the boolean value "true", at the index
* in which that image appears in the list returned by {@link FastBlobStateEngine}.getImageConfigurations()
*
* @param data
* @param imageMembershipsFlags
*/
public int add(T data, long imageMembershipsFlags) {
if(!ordinalMap.isReadyForAddingObjects())
throw new RuntimeException("The FastBlobStateEngine is not ready to add more Objects. Did you remember to call stateEngine.prepareForNextCycle()?");
if (objectOrdinalMap != null) {
Entry existingEntry = objectOrdinalMap.getEntry(data);
if (existingEntry != null) {
if (existingEntry.hasImageMembershipsFlags(imageMembershipsFlags)) {
return existingEntry.getOrdinal();
}
}
}
FastBlobSerializationRecord rec = record();
rec.setImageMembershipsFlags(imageMembershipsFlags);
serializer.serialize(data, rec);
ByteDataBuffer scratch = scratch();
rec.writeDataTo(scratch);
int ordinal = addData(scratch, imageMembershipsFlags);
scratch.reset();
rec.reset();
if (objectOrdinalMap != null) {
objectOrdinalMap.put(data, ordinal, imageMembershipsFlags);
}
return ordinal;
}
/**
* Hook to add raw data. This is used during FastBlobStateEngine
* combination. PreviousState
*
* @param data
* @param imageMembershipsFlags
* @return
*/
@Deprecated
public int addData(ByteDataBuffer data, boolean[] imageMembershipsFlags) {
return addData(data, FastBlobImageUtils.toLong(imageMembershipsFlags));
}
/**
* Hook to add raw data. This is used during FastBlobStateEngine combination.
*PreviousState
* @param data
* @param imageMembershipsFlags
* @return
*/
public int addData(ByteDataBuffer data, long imageMembershipsFlags) {
int ordinal = ordinalMap.getOrAssignOrdinal(data);
addOrdinalToImages(imageMembershipsFlags, ordinal);
return ordinal;
}
/**
* Copy the state data into the provided FastBlobTypeSerializationState.<p/>
*
* This is used during FastBlobStateEngine combination.<p/>
*
* Thread safety: This cannot be safely called concurrently with add() operations to *this* state engine.<p/>
*
* @param otherState
* @param stateOrdinalMappers
*/
public void copyTo(FastBlobTypeSerializationState<?> otherState, OrdinalMapping ordinalMapping) {
ordinalMap.copySerializedObjectData(otherState, imageMemberships, ordinalMapping);
}
/**
* Fill the data from this serialization state into the provided FastBlobTypeDeserializationState<p/>
*
* The provided deserialization state should be of the exact same type as this FastBlobTypeSerializationState (it should contain
* exactly the same schema).<p/>
*
* @param otherState
*/
public void fillDeserializationState(FastBlobTypeDeserializationState<?> otherState) {
otherState.populateFromByteOrdinalMap(ordinalMap);
}
/**
* Called to perform a state transition.<p/>
*
* Precondition: We are adding objects to this state engine.<br/>
* Postcondition: We are writing the previously added objects to a FastBlob.
*
* @return the length of the maximum serialized object representation for this type.
*/
public int prepareForWrite() {
int maxLengthOfAnyRecord = ordinalMap.prepareForWrite();
return maxLengthOfAnyRecord;
}
/**
* Called to perform a state transition.<p/>
*
* Precondition: We are writing the previously added objects to a FastBlob.<br/>
* Postcondition: We are ready to add objects to this state engine for the next server cycle.
*/
public void prepareForNextCycle() {
ThreadSafeBitSet usedOrdinals = ThreadSafeBitSet.orAll(imageMemberships);
ordinalMap.compact(usedOrdinals);
ThreadSafeBitSet temp[] = previousCycleImageMemberships;
previousCycleImageMemberships = imageMemberships;
imageMemberships = temp;
previousStateTypeSchema = typeSchema;
typeSchema = serializer.getFastBlobSchema();
for(ThreadSafeBitSet bitSet : imageMemberships) {
bitSet.clearAll();
}
if (objectOrdinalMap != null) {
objectOrdinalMap.clear();
}
}
/**
* Write the serialized representation of the object assigned to the specified ordinal to the stream.
*/
public void writeObjectTo(OutputStream os, int ordinal) throws IOException {
ordinalMap.writeSerializedObject(os, ordinal);
}
/**
* Is this type state engine in the cycle stage which allows for writing of blob data?
*/
public boolean isReadyForWriting() {
return ordinalMap.isReadyForWriting();
}
/**
* @param imageIndex the index of an image in the list returned by FastBlobStateEngine.getImageConfigurations()
*
* @return the bit set specifying which ordinals were referenced in the image at the given index during the current cycle.
*/
public ThreadSafeBitSet getImageMembershipBitSet(int imageIndex) {
return imageMemberships[imageIndex];
}
/**
* @param imageIndex the index of an image in the list returned by FastBlobStateEngine.getImageConfigurations()
*
* @return the bit set specifying which ordinals were referenced in the image at the given index during the previous cycle.
*/
public ThreadSafeBitSet getPreviousCycleImageMembershipBitSet(int imageIndex) {
return previousCycleImageMemberships[imageIndex];
}
/**
* Update the bit sets for image membership to indicate that the specified
* ordinal was referenced.
*
* @see com.netflix.zeno.fastblob.FastBlobImageUtils.toInteger
*
* @param imageMembershipsFlags
* @param ordinal
*/
private void addOrdinalToImages(long imageMembershipsFlags, int ordinal) {
// This code is tightly related to FastBlobImageUtils packing order
int count = 0;
while (imageMembershipsFlags != 0) {
if ((imageMembershipsFlags & 1) != 0) {
imageMemberships[count].set(ordinal);
}
imageMembershipsFlags = imageMembershipsFlags >>> 1;
count++;
}
}
private ThreadSafeBitSet[] initializeImageMembershipBitSets(int numImages) {
ThreadSafeBitSet sets[] = new ThreadSafeBitSet[numImages];
for(int i=0;i<numImages;i++) {
sets[i] = new ThreadSafeBitSet();
}
return sets;
}
/**
* Get or create a scratch byte array. Each thread will need its own array, so these
* are referenced via a ThreadLocal variable.
*/
private ByteDataBuffer scratch() {
ByteDataBuffer scratch = serializedScratchSpace.get();
if(scratch == null) {
scratch = new ByteDataBuffer(32);
serializedScratchSpace.set(scratch);
}
return scratch;
}
/**
* Get or create a FastBlobSerializationRecord. Each thread will create and reuse its own record,
* so these are referenced via a ThreadLocal variable.
*/
private FastBlobSerializationRecord record() {
FastBlobSerializationRecord rec = serializationRecord.get();
if(rec == null) {
rec = new FastBlobSerializationRecord(typeSchema);
serializationRecord.set(rec);
}
return rec;
}
/**
* Serialize this FastBlobTypeSerializationState to an OutputStream
*/
public void serializeTo(DataOutputStream os) throws IOException {
typeSchema.writeTo(os);
ordinalMap.serializeTo(os);
for(ThreadSafeBitSet bitSet : imageMemberships) {
bitSet.serializeTo(os);
}
}
/**
* Deserialize this FastBlobTypeSerializationState from an InputStream
*/
public void deserializeFrom(DataInputStream is, int numConfigs) throws IOException {
typeSchema = FastBlobSchema.readFrom(is);
ordinalMap = ByteArrayOrdinalMap.deserializeFrom(is);
for(int i=0;i<numConfigs;i++) {
ThreadSafeBitSet bitSet = ThreadSafeBitSet.deserializeFrom(is);
imageMemberships[i] = bitSet;
}
}
/**
* Discard a serialized state -- this happens if an object type is completely removed.
*/
public static void discardSerializedTypeSerializationState(DataInputStream is, int numConfigs) throws IOException {
FastBlobSchema.readFrom(is);
ByteArrayOrdinalMap.deserializeFrom(is);
for(int i=0;i<numConfigs;i++)
ThreadSafeBitSet.deserializeFrom(is);
}
}
| 8,364 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/state/TypeDeserializationStateIterator.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.state;
import java.util.Iterator;
import java.util.List;
/**
*
* Iterator over the List containing a FastBlobTypeDeserializationState's Objects.<p/>
*
* This implementation skips null elements and does not support remove().
*
* @author dkoszewnik
*
*/
public class TypeDeserializationStateIterator<T> implements Iterator<T> {
private final List<T> list;
private int currentOrdinal = 0;
public TypeDeserializationStateIterator(List<T> stateList) {
this.list = stateList;
this.currentOrdinal = -1;
moveToNext();
}
@Override
public boolean hasNext() {
return currentOrdinal < list.size();
}
@Override
public T next() {
T current = list.get(currentOrdinal);
moveToNext();
return current;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
private void moveToNext() {
currentOrdinal++;
while(currentOrdinal < list.size()) {
if(list.get(currentOrdinal) != null)
return;
currentOrdinal++;
}
}
}
| 8,365 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/state/ByteArrayOrdinalMap.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.state;
import com.netflix.zeno.fastblob.FastBlobImageUtils;
import com.netflix.zeno.fastblob.OrdinalMapping;
import com.netflix.zeno.fastblob.StateOrdinalMapping;
import com.netflix.zeno.fastblob.record.ByteDataBuffer;
import com.netflix.zeno.fastblob.record.FastBlobDeserializationRecord;
import com.netflix.zeno.fastblob.record.SegmentedByteArray;
import com.netflix.zeno.fastblob.record.SegmentedByteArrayHasher;
import com.netflix.zeno.fastblob.record.VarInt;
import com.netflix.zeno.util.SimultaneousExecutor;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicLongArray;
/**
*
* This data structure maps byte sequences to ordinals. This is a hash table. The <code>pointersAndOrdinals</code> AtomicLongArray contains
* keys, and the <code>ByteDataBuffer</code> contains values. Each key has two components. The high 28 bits in the key represents the ordinal.
* The low 36 bits represents the pointer to the start position of the byte sequence in the ByteDataBuffer. Each byte sequence is preceded by
* a variable-length integer (see {@link VarInt}), indicating the length of the sequence.<p/>
*
* This implementation is extremely fast. Even though it would be technically correct and clearer,
* using a separate int[] array for the pointers, and an AtomicIntegerArray for the ordinals as keys
* was measured as two orders of magnitude slower.
*
* @author dkoszewnik
*
*/
public class ByteArrayOrdinalMap {
private final static long EMPTY_BUCKET_VALUE = -1L;
/// IMPORTANT: Thread safety: We need volatile access semantics to the individual elements in the
/// pointersAndOrdinals array. This only works in JVMs 1.5 or later (JSR 133).
/// Ordinal is the high 28 bits. Pointer to byte data is the low 36 bits.
private AtomicLongArray pointersAndOrdinals;
private final ByteDataBuffer byteData;
private final FreeOrdinalTracker freeOrdinalTracker;
private int size;
private int sizeBeforeGrow;
private long pointersByOrdinal[];
public ByteArrayOrdinalMap() {
this(262144);
}
public ByteArrayOrdinalMap(int bufferSize) {
this.freeOrdinalTracker = new FreeOrdinalTracker();
this.byteData = new ByteDataBuffer(bufferSize);
this.pointersAndOrdinals = emptyKeyArray(256);
this.sizeBeforeGrow = 179; /// 70% load factor
this.size = 0;
}
private ByteArrayOrdinalMap(long keys[], ByteDataBuffer byteData, FreeOrdinalTracker freeOrdinalTracker, int keyArraySize) {
this.freeOrdinalTracker = freeOrdinalTracker;
this.byteData = byteData;
AtomicLongArray pointersAndOrdinals = emptyKeyArray(keyArraySize);
populateNewHashArray(pointersAndOrdinals, keys);
this.pointersAndOrdinals = pointersAndOrdinals;
this.size = keys.length;
this.sizeBeforeGrow = keyArraySize * 7 / 10; /// 70% load factor
}
/**
* Add a sequence of bytes to this map. If the sequence of bytes has already been added to this map, return the originally assigned ordinal.
* If the sequence of bytes has not been added to this map, assign and return a new ordinal. This operation is thread-safe.
*/
public int getOrAssignOrdinal(ByteDataBuffer serializedRepresentation) {
int hash = SegmentedByteArrayHasher.hashCode(serializedRepresentation);
int modBitmask = pointersAndOrdinals.length() - 1;
int bucket = hash & modBitmask;
long key = pointersAndOrdinals.get(bucket);
/// linear probing to resolve collisions.
while(key != EMPTY_BUCKET_VALUE) {
if(compare(serializedRepresentation, key)) {
return (int)(key >> 36);
}
bucket = (bucket + 1) & modBitmask;
key = pointersAndOrdinals.get(bucket);
}
return assignOrdinal(serializedRepresentation, hash);
}
/// acquire the lock before writing.
private synchronized int assignOrdinal(ByteDataBuffer serializedRepresentation, int hash) {
if(size > sizeBeforeGrow)
growKeyArray();
/// check to make sure that after acquiring the lock, the element still does not exist.
/// this operation is akin to double-checked locking which is 'fixed' with the JSR 133 memory model in JVM >= 1.5.
int modBitmask = pointersAndOrdinals.length() - 1;
int bucket = hash & modBitmask;
long key = pointersAndOrdinals.get(bucket);
while(key != EMPTY_BUCKET_VALUE) {
if(compare(serializedRepresentation, key)) {
return (int)(key >> 36);
}
bucket = (bucket + 1) & modBitmask;
key = pointersAndOrdinals.get(bucket);
}
/// the ordinal for this object still does not exist in the list, even after the lock has been acquired.
/// it is up to this thread to add it at the current bucket position.
int ordinal = freeOrdinalTracker.getFreeOrdinal();
long pointer = byteData.length();
VarInt.writeVInt(byteData, (int)serializedRepresentation.length());
serializedRepresentation.copyTo(byteData);
key = ((long)ordinal << 36) | pointer;
size++;
/// this set on the AtomicLongArray has volatile semantics (i.e. behaves like a monitor release).
/// Any other thread reading this element in the AtomicLongArray will have visibility to all memory writes this thread has made up to this point.
/// This means the entire byte sequence is guaranteed to be visible to any thread which reads the pointer to that data.
pointersAndOrdinals.set(bucket, key);
return ordinal;
}
/**
* Assign a predefined ordinal to a serialized representation.<p/>
*
* WARNING: THIS OPERATION IS NOT THREAD-SAFE.<p/>
*
* This is intended for use in the client-side heap-safe double snapshot load.
*
*/
public void put(ByteDataBuffer serializedRepresentation, int ordinal) {
if(size > sizeBeforeGrow)
growKeyArray();
int hash = SegmentedByteArrayHasher.hashCode(serializedRepresentation);
int modBitmask = pointersAndOrdinals.length() - 1;
int bucket = hash & modBitmask;
long key = pointersAndOrdinals.get(bucket);
while(key != EMPTY_BUCKET_VALUE) {
if(compare(serializedRepresentation, key))
return;
bucket = (bucket + 1) & modBitmask;
key = pointersAndOrdinals.get(bucket);
}
long pointer = byteData.length();
VarInt.writeVInt(byteData, (int)serializedRepresentation.length());
serializedRepresentation.copyTo(byteData);
key = ((long)ordinal << 36) | pointer;
size++;
pointersAndOrdinals.set(bucket, key);
}
/**
* Returns the ordinal for a previously added byte sequence. If this byte sequence has not been added to the map, then -1 is returned.<p/>
*
* This is intended for use in the client-side heap-safe double snapshot load.
*
* @param serializedRepresentation
* @return The ordinal for this serialized representation, or -1.
*/
public int get(ByteDataBuffer serializedRepresentation) {
int hash = SegmentedByteArrayHasher.hashCode(serializedRepresentation);
int modBitmask = pointersAndOrdinals.length() - 1;
int bucket = hash & modBitmask;
long key = pointersAndOrdinals.get(bucket);
/// linear probing to resolve collisions.
while(key != EMPTY_BUCKET_VALUE) {
if(compare(serializedRepresentation, key)) {
return (int)(key >> 36);
}
bucket = (bucket + 1) & modBitmask;
key = pointersAndOrdinals.get(bucket);
}
return -1;
}
/**
* Remove all entries from this map, but reuse the existing arrays when populating the map next time.
*
* This is intended for use in the client-side heap-safe double snapshot load.
*/
public void clear() {
for(int i=0;i<pointersAndOrdinals.length();i++) {
pointersAndOrdinals.set(i, EMPTY_BUCKET_VALUE);
}
byteData.reset();
size = 0;
}
/**
* Create an array mapping the ordinals to pointers, so that they can be easily looked up
* when writing to blob streams.
*
* @return the maximum length, in bytes, of any byte sequence in this map.
*/
public int prepareForWrite() {
int maxOrdinal = 0;
int maxLength = 0;
for(int i=0;i<pointersAndOrdinals.length();i++) {
long key = pointersAndOrdinals.get(i);
if(key != EMPTY_BUCKET_VALUE) {
int ordinal = (int)(key >> 36);
if(ordinal > maxOrdinal)
maxOrdinal = ordinal;
}
}
pointersByOrdinal = new long[maxOrdinal + 1];
Arrays.fill(pointersByOrdinal, -1);
for(int i=0;i<pointersAndOrdinals.length();i++) {
long key = pointersAndOrdinals.get(i);
if(key != EMPTY_BUCKET_VALUE) {
int ordinal = (int)(key >> 36);
pointersByOrdinal[ordinal] = key & 0xFFFFFFFFFL;
int dataLength = VarInt.readVInt(byteData.getUnderlyingArray(), pointersByOrdinal[ordinal]);
if(dataLength > maxLength)
maxLength = dataLength;
}
}
return maxLength;
}
/**
* Reclaim space in the byte array used in the previous cycle, but not referenced in this cycle.<p/>
*
* This is achieved by shifting all used byte sequences down in the byte array, then updating
* the key array to reflect the new pointers and exclude the removed entries. This is also where ordinals
* which are unused are returned to the pool.<p/>
*
* @param usedOrdinals a bit set representing the ordinals which are currently referenced by any image.
*/
public void compact(ThreadSafeBitSet usedOrdinals) {
long populatedReverseKeys[] = new long[size];
int counter = 0;
for(int i=0;i<pointersAndOrdinals.length();i++) {
long key = pointersAndOrdinals.get(i);
if(key != EMPTY_BUCKET_VALUE) {
populatedReverseKeys[counter++] = key << 28 | key >>> 36;
}
}
Arrays.sort(populatedReverseKeys);
SegmentedByteArray arr = byteData.getUnderlyingArray();
long currentCopyPointer = 0;
for(int i=0;i<populatedReverseKeys.length;i++) {
int ordinal = (int)(populatedReverseKeys[i] & 0xFFFFFFF);
if(usedOrdinals.get(ordinal)) {
long pointer = populatedReverseKeys[i] >> 28;
int length = VarInt.readVInt(arr, pointer);
length += VarInt.sizeOfVInt(length);
if(currentCopyPointer != pointer)
arr.copy(arr, pointer, currentCopyPointer, length);
populatedReverseKeys[i] = populatedReverseKeys[i] << 36 | currentCopyPointer;
currentCopyPointer += length;
} else {
freeOrdinalTracker.returnOrdinalToPool(ordinal);
populatedReverseKeys[i] = EMPTY_BUCKET_VALUE;
}
}
byteData.setPosition(currentCopyPointer);
for(int i=0;i<pointersAndOrdinals.length();i++) {
pointersAndOrdinals.set(i, EMPTY_BUCKET_VALUE);
}
populateNewHashArray(pointersAndOrdinals, populatedReverseKeys);
size = usedOrdinals.cardinality();
pointersByOrdinal = null;
}
/**
* Write the byte sequence of an object specified by an ordinal to the OutputStream.
*
* @throws IOException
*/
public void writeSerializedObject(OutputStream out, int ordinal) throws IOException {
long pointer = pointersByOrdinal[ordinal] & 0xFFFFFFFFFL;
int length = VarInt.readVInt(byteData.getUnderlyingArray(), pointer);
pointer += VarInt.sizeOfVInt(length);
byteData.getUnderlyingArray().writeTo(out, pointer, length);
}
public boolean isReadyForWriting() {
return pointersByOrdinal != null;
}
public boolean isReadyForAddingObjects() {
return pointersByOrdinal == null;
}
public long getDataSize() {
return byteData.length();
}
/**
* Fill a deserialization state from the serialized data which exists in this ByteArrayOrdinalMap
*
* @param copyTo
*/
void fillDeserializationStateFromData(final FastBlobTypeDeserializationState<?> fill) {
SimultaneousExecutor executor = new SimultaneousExecutor(1);
final int numThreads = executor.getMaximumPoolSize();
fill.ensureCapacity(maxOrdinal() + 1);
for(int i=0;i<numThreads;i++) {
final int threadNumber = i;
executor.execute(new Runnable() {
@Override
public void run() {
FastBlobDeserializationRecord rec = new FastBlobDeserializationRecord(fill.getSchema(), byteData.getUnderlyingArray());
for(int i=threadNumber;i<pointersAndOrdinals.length();i += numThreads) {
long pointerAndOrdinal = pointersAndOrdinals.get(i);
if(pointerAndOrdinal != EMPTY_BUCKET_VALUE) {
long pointer = pointerAndOrdinal & 0xFFFFFFFFFL;
int ordinal = (int)(pointerAndOrdinal >> 36);
int sizeOfData = VarInt.readVInt(byteData.getUnderlyingArray(), pointer);
pointer += VarInt.sizeOfVInt(sizeOfData);
rec.position(pointer);
fill.add(ordinal, rec);
}
}
}
});
}
executor.awaitUninterruptibly();
}
/**
* Copy all of the data from this ByteArrayOrdinalMap to the provided FastBlobTypeSerializationState.
*
* Image memberships for each ordinal are determined via the provided array of ThreadSafeBitSets.
*
* @param destState
* @param imageMemberships
* @param stateOrdinalMappers
*/
void copySerializedObjectData(final FastBlobTypeSerializationState<?> destState, final ThreadSafeBitSet imageMemberships[],
final OrdinalMapping ordinalMapping) {
final StateOrdinalMapping stateOrdinalMapping = ordinalMapping.createStateOrdinalMapping(destState.getName(), maxOrdinal());
SimultaneousExecutor executor = new SimultaneousExecutor(8);
final int numThreads = executor.getMaximumPoolSize();
for(int i=0;i<numThreads;i++) {
final int threadNumber = i;
executor.submit( new Runnable() {
@Override
public void run() {
final ByteDataBuffer mappedBuffer = new ByteDataBuffer();
final FastBlobDeserializationRecord rec = new FastBlobDeserializationRecord(destState.getSchema(), byteData.getUnderlyingArray());
final boolean imageMembershipsFlags[] = new boolean[imageMemberships.length];
final OrdinalRemapper remapper = new OrdinalRemapper(ordinalMapping);
for(int j = threadNumber;j < pointersAndOrdinals.length();j += numThreads) {
long pointerAndOrdinal = pointersAndOrdinals.get(j);
if(pointerAndOrdinal != EMPTY_BUCKET_VALUE) {
long pointer = pointerAndOrdinal & 0xFFFFFFFFFL;
int ordinal = (int)(pointerAndOrdinal >> 36);
for(int imageIndex=0;imageIndex<imageMemberships.length;imageIndex++) {
imageMembershipsFlags[imageIndex] = imageMemberships[imageIndex].get(ordinal);
}
int sizeOfData = VarInt.readVInt(byteData.getUnderlyingArray(), pointer);
pointer += VarInt.sizeOfVInt(sizeOfData);
rec.position(pointer);
remapper.remapOrdinals(rec, mappedBuffer);
int newOrdinal = destState.addData(mappedBuffer, FastBlobImageUtils.toLong(imageMembershipsFlags));
stateOrdinalMapping.setMappedOrdinal(ordinal, newOrdinal);
mappedBuffer.reset();
}
}
}
});
}
executor.awaitUninterruptibly();
}
public int maxOrdinal() {
int maxOrdinal = 0;
for(int i=0;i<pointersAndOrdinals.length();i++) {
int ordinal = (int)(pointersAndOrdinals.get(i) >> 36);
if(ordinal > maxOrdinal)
maxOrdinal = ordinal;
}
return maxOrdinal;
}
/**
* Compare the byte sequence contained in the supplied ByteDataBuffer with the
* sequence contained in the map pointed to by the specified key, byte by byte.
*/
private boolean compare(ByteDataBuffer serializedRepresentation, long key) {
long position = key & 0xFFFFFFFFFL;
int sizeOfData = VarInt.readVInt(byteData.getUnderlyingArray(), position);
if(sizeOfData != serializedRepresentation.length())
return false;
position += VarInt.sizeOfVInt(sizeOfData);
for(int i=0;i<sizeOfData;i++) {
if(serializedRepresentation.get(i) != byteData.get(position++))
return false;
}
return true;
}
/**
* Grow the key array. All of the values in the current array must be re-hashed and added to the new array.
*/
private void growKeyArray() {
AtomicLongArray newKeys = emptyKeyArray(pointersAndOrdinals.length() * 2);
long valuesToAdd[] = new long[size];
int counter = 0;
/// do not iterate over these values in the same order in which they appear in the hashed array.
/// if we do so, we cause large clusters of collisions to appear (because we resolve collisions with linear probing).
for(int i=0;i<pointersAndOrdinals.length();i++) {
long key = pointersAndOrdinals.get(i);
if(key != EMPTY_BUCKET_VALUE) {
valuesToAdd[counter++] = key;
}
}
Arrays.sort(valuesToAdd);
populateNewHashArray(newKeys, valuesToAdd);
/// 70% load factor
sizeBeforeGrow = (newKeys.length() * 7) / 10;
pointersAndOrdinals = newKeys;
}
/**
* Hash all of the existing values specified by the keys in the supplied long array
* into the supplied AtomicLongArray.
*/
private void populateNewHashArray(AtomicLongArray newKeys, long[] valuesToAdd) {
int modBitmask = newKeys.length() - 1;
for(int i=0;i<valuesToAdd.length;i++) {
if(valuesToAdd[i] != EMPTY_BUCKET_VALUE) {
int hash = rehashPreviouslyAddedData(valuesToAdd[i]);
int bucket = hash & modBitmask;
while(newKeys.get(bucket) != EMPTY_BUCKET_VALUE)
bucket = (bucket + 1) & modBitmask;
newKeys.set(bucket, valuesToAdd[i]);
}
}
}
/**
* Get the hash code for the byte array pointed to by the specified key.
*/
private int rehashPreviouslyAddedData(long key) {
long position = key & 0xFFFFFFFFFL;
int sizeOfData = VarInt.readVInt(byteData.getUnderlyingArray(), position);
position += VarInt.sizeOfVInt(sizeOfData);
return SegmentedByteArrayHasher.hashCode(byteData.getUnderlyingArray(), position, sizeOfData);
}
/**
* Create an AtomicLongArray of the specified size, each value in the array will be EMPTY_BUCKET_VALUE
*/
private AtomicLongArray emptyKeyArray(int size) {
AtomicLongArray arr = new AtomicLongArray(size);
for(int i=0;i<arr.length();i++) {
arr.set(i, EMPTY_BUCKET_VALUE);
}
return arr;
}
/**
* This is used to store the server's SerializationState, so that it may resume the delta chain after a new server is brought back up.
* @param os
* @throws IOException
*/
public void serializeTo(OutputStream os) throws IOException {
/// indicate which state this ByteArrayOrdinalMap was in.
int isPreparedForWrite = pointersByOrdinal != null ? 1 : 0;
os.write(isPreparedForWrite);
/// write the hashed key array size
VarInt.writeVInt(os, pointersAndOrdinals.length());
/// write the keys in sorted ordinal order to the stream
long keys[] = new long[size];
int counter = 0;
for(int i=0;i<pointersAndOrdinals.length();i++) {
long key = pointersAndOrdinals.get(i);
if(key != EMPTY_BUCKET_VALUE) {
keys[counter++] = key;
}
}
Arrays.sort(keys);
VarInt.writeVInt(os, keys.length);
for(int i=0;i<keys.length;i++) {
VarInt.writeVInt(os, (int)(keys[i] >> 36));
VarInt.writeVLong(os, keys[i] & 0xFFFFFFFFFL);
}
/// write the byte data to the stream
VarInt.writeVLong(os, byteData.length());
for(long i=0;i<byteData.length();i++) {
os.write(byteData.get(i) & 0xFF);
}
/// write the freeOrdinalTracker to the stream
freeOrdinalTracker.serializeTo(os);
}
/**
* This is used to restore the server's SerializationState, so that it may resume the delta chain after a new server is brought back up.
*
* @throws IOException
*/
public static ByteArrayOrdinalMap deserializeFrom(InputStream is) throws IOException {
boolean wasPreparedForWrite = is.read() == 1;
int hashedKeyArraySize = VarInt.readVInt(is);
long keys[] = new long[VarInt.readVInt(is)];
for(int i=0;i<keys.length;i++) {
keys[i] = (VarInt.readVLong(is) << 36) | VarInt.readVLong(is);
}
ByteDataBuffer byteData = new ByteDataBuffer(262144);
long byteDataSize = VarInt.readVLong(is);
for(long i=0;i<byteDataSize;i++) {
byteData.write((byte)is.read());
}
FreeOrdinalTracker freeOrdinalTracker = FreeOrdinalTracker.deserializeFrom(is);
ByteArrayOrdinalMap deserializedMap = new ByteArrayOrdinalMap(keys, byteData, freeOrdinalTracker, hashedKeyArraySize);
if(wasPreparedForWrite)
deserializedMap.prepareForWrite();
return deserializedMap;
}
public ByteDataBuffer getByteData() {
return byteData;
}
public AtomicLongArray getPointersAndOrdinals() {
return pointersAndOrdinals;
}
public static boolean isPointerAndOrdinalEmpty(long pointerAndOrdinal) {
return pointerAndOrdinal == EMPTY_BUCKET_VALUE;
}
public static long getPointer(long pointerAndOrdinal) {
return pointerAndOrdinal & 0xFFFFFFFFFL;
}
public static int getOrdinal(long pointerAndOrdinal) {
return (int)(pointerAndOrdinal >> 36);
}
} | 8,366 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/state/OrdinalRemapper.java | package com.netflix.zeno.fastblob.state;
import com.netflix.zeno.fastblob.OrdinalMapping;
import com.netflix.zeno.fastblob.record.ByteData;
import com.netflix.zeno.fastblob.record.ByteDataBuffer;
import com.netflix.zeno.fastblob.record.FastBlobDeserializationRecord;
import com.netflix.zeno.fastblob.record.SegmentedByteArray;
import com.netflix.zeno.fastblob.record.VarInt;
import com.netflix.zeno.fastblob.record.schema.FastBlobSchema;
import com.netflix.zeno.fastblob.record.schema.FieldDefinition;
import com.netflix.zeno.fastblob.record.schema.MapFieldDefinition;
import com.netflix.zeno.fastblob.record.schema.TypedFieldDefinition;
import java.util.Arrays;
public class OrdinalRemapper {
private final ByteDataBuffer scratch;
private final OrdinalMapping ordinalMapping;
public OrdinalRemapper(OrdinalMapping ordinalMapping) {
this.ordinalMapping = ordinalMapping;
this.scratch = new ByteDataBuffer();
}
public void remapOrdinals(FastBlobDeserializationRecord rec, ByteDataBuffer toBuffer) {
FastBlobSchema schema = rec.getSchema();
ByteData fromSpace = rec.getByteData();
long currentPointerPosition = rec.position();
for(int i=0;i<schema.numFields();i++) {
FieldDefinition fieldDef = schema.getFieldDefinition(i);
int length = rec.getFieldLength(schema.getFieldName(i));
TypedFieldDefinition typedFieldDef;
int ordinal;
int mappedOrdinal;
switch(fieldDef.getFieldType()) {
case OBJECT:
typedFieldDef = (TypedFieldDefinition)fieldDef;
if(VarInt.readVNull(fromSpace, currentPointerPosition)) {
VarInt.writeVNull(toBuffer);
currentPointerPosition++;
} else {
ordinal = VarInt.readVInt(fromSpace, currentPointerPosition);
currentPointerPosition += VarInt.sizeOfVInt(ordinal);
mappedOrdinal = getMappedOrdinal(typedFieldDef.getSubType(), ordinal);
VarInt.writeVInt(toBuffer, mappedOrdinal);
}
break;
case SET:
typedFieldDef = (TypedFieldDefinition)fieldDef;
currentPointerPosition = copySetWithRemappedOrdinals(fromSpace, currentPointerPosition, toBuffer, typedFieldDef.getSubType());
break;
case LIST:
typedFieldDef = (TypedFieldDefinition)fieldDef;
currentPointerPosition = copyListWithRemappedOrdinals(toBuffer, fromSpace, currentPointerPosition, typedFieldDef.getSubType());
break;
case MAP:
MapFieldDefinition mapFieldDef = (MapFieldDefinition)fieldDef;
currentPointerPosition = copyMapWithRemappedOrdinals(toBuffer, fromSpace, currentPointerPosition, mapFieldDef);
break;
default:
if(fromSpace instanceof SegmentedByteArray)
toBuffer.copyFrom(((SegmentedByteArray)fromSpace), currentPointerPosition, length);
else
toBuffer.copyFrom(fromSpace, currentPointerPosition, length);
currentPointerPosition += length;
}
}
}
private long copyListWithRemappedOrdinals(ByteDataBuffer toSpace, ByteData fromSpace, long pointer, String elementType) {
int sizeOfData = VarInt.readVInt(fromSpace, pointer);
pointer += VarInt.sizeOfVInt(sizeOfData);
int readBytesCounter = 0;
while(readBytesCounter < sizeOfData) {
if(VarInt.readVNull(fromSpace, pointer)) {
VarInt.writeVNull(scratch);
pointer++;
readBytesCounter++;
} else {
int ordinal = VarInt.readVInt(fromSpace, pointer);
int sizeOfOrdinal = VarInt.sizeOfVInt(ordinal);
pointer += sizeOfOrdinal;
readBytesCounter += sizeOfOrdinal;
int mappedOrdinal = getMappedOrdinal(elementType, ordinal);
VarInt.writeVInt(scratch, mappedOrdinal);
}
}
VarInt.writeVInt(toSpace, (int)scratch.length());
toSpace.copyFrom(scratch.getUnderlyingArray(), 0L, (int)scratch.length());
scratch.reset();
return pointer;
}
private long copySetWithRemappedOrdinals(ByteData fromSpace, long pointer, ByteDataBuffer toSpace, String elementType) {
int sizeOfData = VarInt.readVInt(fromSpace, pointer);
pointer += VarInt.sizeOfVInt(sizeOfData);
int readBytesCounter = 0;
int readOrdinalsCounter = 0;
int currentOrdinal = 0;
int mappedOrdinals[] = new int[sizeOfData];
while(readBytesCounter < sizeOfData) {
if(VarInt.readVNull(fromSpace, pointer)) {
mappedOrdinals[readOrdinalsCounter++] = -1;
pointer++;
readBytesCounter++;
} else {
int ordinalDelta = VarInt.readVInt(fromSpace, pointer);
int sizeOfOrdinalDelta = VarInt.sizeOfVInt(ordinalDelta);
pointer += sizeOfOrdinalDelta;
readBytesCounter += sizeOfOrdinalDelta;
currentOrdinal += ordinalDelta;
int mappedOrdinal = getMappedOrdinal(elementType, currentOrdinal);
mappedOrdinals[readOrdinalsCounter++] = mappedOrdinal;
}
}
Arrays.sort(mappedOrdinals, 0, readOrdinalsCounter);
currentOrdinal = 0;
for(int j=0;j<readOrdinalsCounter;j++) {
if(mappedOrdinals[j] == -1) {
VarInt.writeVNull(scratch);
} else {
VarInt.writeVInt(scratch, mappedOrdinals[j] - currentOrdinal);
currentOrdinal = mappedOrdinals[j];
}
}
VarInt.writeVInt(toSpace, (int)scratch.length());
toSpace.copyFrom(scratch.getUnderlyingArray(), 0L, (int)scratch.length());
scratch.reset();
return pointer;
}
private long copyMapWithRemappedOrdinals(ByteDataBuffer toSpace, ByteData fromSpace, long pointer, MapFieldDefinition mapFieldDef) {
int sizeOfData = VarInt.readVInt(fromSpace, pointer);
long mapEntries[] = new long[sizeOfData / 2];
pointer += VarInt.sizeOfVInt(sizeOfData);
int readBytesCounter = 0;
int currentValueOrdinal = 0;
int readMapEntries = 0;
while(readBytesCounter < sizeOfData) {
int keyOrdinal = -1;
int sizeOfKeyOrdinal = 1;
if(VarInt.readVNull(fromSpace, pointer)) {
pointer++;
} else {
keyOrdinal = VarInt.readVInt(fromSpace, pointer);
sizeOfKeyOrdinal = VarInt.sizeOfVInt(keyOrdinal);
pointer += sizeOfKeyOrdinal;
}
int valueOrdinalDelta = -1;
int sizeOfValueOrdinalDelta = 1;
if(VarInt.readVNull(fromSpace, pointer)) {
pointer++;
} else {
valueOrdinalDelta = VarInt.readVInt(fromSpace, pointer);
sizeOfValueOrdinalDelta = VarInt.sizeOfVInt(valueOrdinalDelta);
pointer += sizeOfValueOrdinalDelta;
currentValueOrdinal += valueOrdinalDelta;
}
int mappedKeyOrdinal = keyOrdinal == -1 ? -1 : getMappedOrdinal(mapFieldDef.getKeyType(), keyOrdinal);
int mappedValueOrdinal = valueOrdinalDelta == -1 ? -1 : getMappedOrdinal(mapFieldDef.getValueType(), currentValueOrdinal);
mapEntries[readMapEntries++] = mappedValueOrdinal == -1 ? 0xFFFFFFFF00000000L | mappedKeyOrdinal : ((long)mappedValueOrdinal << 32) | (mappedKeyOrdinal & 0xFFFFFFFFL);
readBytesCounter += sizeOfKeyOrdinal + sizeOfValueOrdinalDelta;
}
Arrays.sort(mapEntries, 0, readMapEntries);
currentValueOrdinal = 0;
for(int j=0;j<readMapEntries;j++) {
int valueOrdinal = (int)(mapEntries[j] >> 32);
int keyOrdinal = (int)(mapEntries[j] & 0xFFFFFFFFL);
if(keyOrdinal == -1)
VarInt.writeVNull(scratch);
else
VarInt.writeVInt(scratch, keyOrdinal);
if(valueOrdinal == -1) {
VarInt.writeVNull(scratch);
} else {
VarInt.writeVInt(scratch, valueOrdinal - currentValueOrdinal);
currentValueOrdinal = valueOrdinal;
}
}
VarInt.writeVInt(toSpace, (int)scratch.length());
toSpace.copyFrom(scratch.getUnderlyingArray(), 0L, (int)scratch.length());
scratch.reset();
return pointer;
}
private int getMappedOrdinal(String type, int fromOrdinal) {
return ordinalMapping.getStateOrdinalMapping(type).getMappedOrdinal(fromOrdinal);
}
} | 8,367 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/state/FastBlobTypeDeserializationState.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.fastblob.state;
import com.netflix.zeno.fastblob.FastBlobStateEngine;
import com.netflix.zeno.fastblob.record.ByteDataBuffer;
import com.netflix.zeno.fastblob.record.FastBlobDeserializationRecord;
import com.netflix.zeno.fastblob.record.FastBlobSerializationRecord;
import com.netflix.zeno.fastblob.record.VarInt;
import com.netflix.zeno.fastblob.record.schema.FastBlobSchema;
import com.netflix.zeno.serializer.NFTypeSerializer;
import com.netflix.zeno.util.CollectionUnwrapper;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.atomic.AtomicLongArray;
/**
* This class represents the "deserialization state" for a single type at some level of the object
* hierarchy in the FastBlob serialized data.<p/>
*
* This class is responsible for maintaining the mappings between ordinals and deserialized objects.
* It performs this responsibility by maintaining an ArrayList of objects. The location of the object
* in the ArrayList will be the index of its ordinal.
*
* @param <T>
*
* @author dkoszewnik
*
*/
public class FastBlobTypeDeserializationState<T> implements Iterable<T> {
private final NFTypeSerializer<T> serializer;
private TypeDeserializationStateListener<T> stateListener = TypeDeserializationStateListener.noopCallback();
private List<T> objects;
///the following properties are used for heap-friendly double snapshot refresh
private List<T> previousObjects;
private BitSet copiedPreviousObjects;
private ObjectIdentityOrdinalMap identityOrdinalMap;
public FastBlobTypeDeserializationState(NFTypeSerializer<T> serializer) {
this.serializer = serializer;
this.objects = new ArrayList<T>();
}
public T get(int ordinal) {
if(ordinal >= objects.size())
return null;
return objects.get(ordinal);
}
@SuppressWarnings("deprecation")
public void add(int ordinal, FastBlobDeserializationRecord rec) {
T obj = serializer.deserialize(rec);
ensureCapacity(ordinal + 1);
objects.set(ordinal, obj);
stateListener.addedObject(obj);
stateListener.addedObject(obj, ordinal);
}
@SuppressWarnings("deprecation")
public void remove(int ordinal) {
T removedObject = objects.get(ordinal);
objects.set(ordinal, null);
stateListener.removedObject(removedObject);
stateListener.removedObject(removedObject, ordinal);
}
public void setListener(TypeDeserializationStateListener<T> listener) {
this.stateListener = listener;
}
/**
* Not intended for external consumption.<p/>
*
* This method is only intended to be used during heap-friendly double snapshot refresh.
*/
public void populateByteArrayOrdinalMap(ByteArrayOrdinalMap ordinalMap) {
FastBlobSerializationRecord rec = new FastBlobSerializationRecord(serializer.getFastBlobSchema());
ByteDataBuffer scratch = new ByteDataBuffer();
for(int i=0;i<objects.size();i++) {
T obj = objects.get(i);
if(obj != null) {
serializer.serialize(obj, rec);
rec.writeDataTo(scratch);
ordinalMap.put(scratch, i);
scratch.reset();
rec.reset();
}
}
previousObjects = objects;
copiedPreviousObjects = new BitSet(previousObjects.size());
objects = new ArrayList<T>(previousObjects.size());
}
/**
* Fill this state from the serialized data which exists in this ByteArrayOrdinalMap
*
* @param ordinalMap
*/
public void populateFromByteOrdinalMap(final ByteArrayOrdinalMap ordinalMap) {
ByteDataBuffer byteData = ordinalMap.getByteData();
AtomicLongArray pointersAndOrdinals = ordinalMap.getPointersAndOrdinals();
FastBlobDeserializationRecord rec = new FastBlobDeserializationRecord(getSchema(), byteData.getUnderlyingArray());
for (int i = 0; i < pointersAndOrdinals.length(); i++) {
long pointerAndOrdinal = pointersAndOrdinals.get(i);
if(!ByteArrayOrdinalMap.isPointerAndOrdinalEmpty(pointerAndOrdinal)) {
long pointer = ByteArrayOrdinalMap.getPointer(pointerAndOrdinal);
int ordinal = ByteArrayOrdinalMap.getOrdinal(pointerAndOrdinal);
int sizeOfData = VarInt.readVInt(byteData.getUnderlyingArray(), pointer);
pointer += VarInt.sizeOfVInt(sizeOfData);
rec.position(pointer);
add(ordinal, rec);
}
}
}
/**
* Not intended for external consumption.<p/>
*
* This method is only intended to be used during heap-friendly double snapshot refresh.
*/
public void createIdentityOrdinalMap() {
identityOrdinalMap = new ObjectIdentityOrdinalMap(objects);
}
/**
* Not intended for external consumption.<p/>
*
* This method is only intended to be used during heap-friendly double snapshot refresh.
*/
public int find(T obj) {
if(identityOrdinalMap == null)
return -1;
int ordinal = identityOrdinalMap.get(obj);
if(ordinal < 0)
ordinal = identityOrdinalMap.get(CollectionUnwrapper.unwrap(obj));
return ordinal;
}
/**
* Not intended for external consumption.<p/>
*
* This method is only intended to be used during heap-friendly double snapshot refresh.
*/
public void copyPrevious(int newOrdinal, int previousOrdinal) {
T obj = previousObjects.get(previousOrdinal);
ensureCapacity(newOrdinal + 1);
objects.set(newOrdinal, obj);
copiedPreviousObjects.set(previousOrdinal);
stateListener.reassignedObject(obj, previousOrdinal, newOrdinal);
}
/**
* Not intended for external consumption.<p/>
*
* This method is only intended to be used during heap-friendly double snapshot refresh.
*/
@SuppressWarnings("deprecation")
public void clearPreviousObjects() {
/// each previous object which was *not* copied was removed
for(int i=0;i<previousObjects.size();i++) {
T t = previousObjects.get(i);
if(t != null && !copiedPreviousObjects.get(i)) {
stateListener.removedObject(t);
stateListener.removedObject(t, i);
}
}
previousObjects = null;
copiedPreviousObjects = null;
}
/**
* Not intended for external consumption.<p/>
*
* This method is only intended to be used during heap-friendly double snapshot refresh.
*/
public void clearIdentityOrdinalMap() {
identityOrdinalMap = null;
}
public FastBlobSchema getSchema() {
return serializer.getFastBlobSchema();
}
public NFTypeSerializer<T> getSerializer() {
return serializer;
}
/**
* Counts the number of populated objects in this state.<p/>
*
* @return an integer equal to the number of objects which will be iterated over by the Iterator
* returned from iterator();
*/
public int countObjects() {
int count = 0;
for(int i=0;i<objects.size();i++) {
if(objects.get(i) != null)
count++;
}
return count;
}
/**
* Returns the current maximum ordinal for this type. Returns -1 if this type has no objects.
*
* @return
*/
public int maxOrdinal() {
int ordinal = objects.size();
while(--ordinal >= 0) {
if(objects.get(ordinal) != null)
return ordinal;
}
return -1;
}
@Override
public Iterator<T> iterator() {
return new TypeDeserializationStateIterator<T>(objects);
}
void ensureCapacity(int size) {
while(objects.size() < size) {
objects.add(null);
}
}
public void fillSerializationState(FastBlobStateEngine engine) {
for (T t : this) {
engine.add(serializer.getName(), t);
}
}
}
| 8,368 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/state | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/state/compressed/ByteSequenceRetainer.java | package com.netflix.zeno.fastblob.state.compressed;
import com.netflix.zeno.fastblob.record.ByteData;
import com.netflix.zeno.fastblob.record.ByteDataBuffer;
import com.netflix.zeno.fastblob.record.VarInt;
import java.util.Arrays;
public class ByteSequenceRetainer {
private final static long EMPTY_BUCKET_VALUE = -1L;
private long keys[];
private final ByteDataBuffer buf;
private int size;
private int sizeBeforeGrow;
public ByteSequenceRetainer() {
this.buf = new ByteDataBuffer(262144);
this.keys = new long[256];
Arrays.fill(keys, EMPTY_BUCKET_VALUE);
this.size = 0;
this.sizeBeforeGrow = 179;
}
/*public void addByteSequence(int ordinal, ByteDataBuffer sequence) {
addByteSequence(ordinal, sequence.getUnderlyingArray(), 0L, (int)sequence.length());
}*/
public void addByteSequence(int ordinal, ByteData readSequenceFrom, long seqPointer, int seqLength) {
if(size == sizeBeforeGrow)
growKeyArray();
long key = ((long)ordinal << 36 | buf.length());
int bucket = hashInt(ordinal) % keys.length;
while(keys[bucket] != EMPTY_BUCKET_VALUE) {
bucket++;
bucket %= keys.length;
}
VarInt.writeVInt(buf, seqLength);
buf.copyFrom(readSequenceFrom, seqPointer, seqLength);
keys[bucket] = key;
size++;
}
/**
* Writes the retained sequence of bytes to <code>writeTo</code>. Returns the number of bytes written.
*/
public int retrieveSequence(int ordinal, ByteDataBuffer writeTo) {
int bucket = hashInt(ordinal) % keys.length;
while(keys[bucket] != EMPTY_BUCKET_VALUE) {
int foundOrdinal = (int)(keys[bucket] >>> 36);
if(foundOrdinal == ordinal) {
long pointer = keys[bucket] & 0xFFFFFFFFFL;
int length = VarInt.readVInt(buf.getUnderlyingArray(), pointer);
pointer += VarInt.sizeOfVInt(length);
writeTo.copyFrom(buf.getUnderlyingArray(), pointer, length);
return length;
}
}
return 0;
}
public ByteSequenceRetainerIterator iterator() {
return new ByteSequenceRetainerIterator(keys, size, buf.getUnderlyingArray());
}
public void clear() {
size = 0;
Arrays.fill(keys, EMPTY_BUCKET_VALUE);
buf.reset();
}
private void growKeyArray() {
long newKeys[] = new long[keys.length * 2];
Arrays.fill(newKeys, EMPTY_BUCKET_VALUE);
long keysToAdd[] = sortedPopulatedKeysArray(keys, size);
populateNewHashArray(newKeys, keysToAdd);
this.keys = newKeys;
this.sizeBeforeGrow = (newKeys.length * 7) / 10;
}
/**
* Hash all of the existing values specified by the keys in the supplied long array
* into the supplied AtomicLongArray.
*/
private void populateNewHashArray(long[] newKeys, long[] valuesToAdd) {
int modBitmask = newKeys.length - 1;
for(int i=0;i<valuesToAdd.length;i++) {
int ordinal = (int)(valuesToAdd[i] >>> 36);
int hash = hashInt(ordinal);
int bucket = hash & modBitmask;
while(newKeys[bucket] != EMPTY_BUCKET_VALUE)
bucket = (bucket + 1) & modBitmask;
newKeys[bucket] = valuesToAdd[i];
}
}
private int hashInt(int hash) {
hash = ~hash + (hash << 15);
hash = hash ^ (hash >>> 12);
hash = hash + (hash << 2);
hash = hash ^ (hash >>> 4);
hash = hash * 2057;
hash = hash ^ (hash >>> 16);
return hash & Integer.MAX_VALUE;
}
static class ByteSequenceRetainerIterator {
private final long keysToAdd[];
private final ByteData dataArray;
private int currentKeyIndex;
private int currentOrdinal;
private int currentDataSize;
private long currentPointer;
private ByteSequenceRetainerIterator(long keyArray[], int size, ByteData dataArray) {
this.keysToAdd = sortedPopulatedKeysArray(keyArray, size);
this.dataArray = dataArray;
this.currentKeyIndex = -1;
}
public boolean nextKey() {
currentKeyIndex++;
if(currentKeyIndex < keysToAdd.length) {
currentOrdinal = (int) (keysToAdd[currentKeyIndex] >> 36);
currentPointer = keysToAdd[currentKeyIndex] & 0xFFFFFFFFFL;
currentDataSize = VarInt.readVInt(dataArray, currentPointer);
currentPointer += VarInt.sizeOfVInt(currentDataSize);
return true;
}
return false;
}
public int getCurrentOrdinal() {
return currentOrdinal;
}
public int getCurrentDataSize() {
return currentDataSize;
}
public long getCurrentPointer() {
return currentPointer;
}
public void copyEntryTo(ByteSequenceRetainer other) {
other.addByteSequence(currentOrdinal, dataArray, currentPointer, currentDataSize);
}
}
private static long[] sortedPopulatedKeysArray(long[] keys, int size) {
long arr[] = new long[size];
int counter = 0;
/// do not iterate over these values in the same order in which they appear in the hashed array.
/// if we do so, we cause large clusters of collisions to appear (because we resolve collisions with linear probing).
for(int i=0;i<keys.length;i++) {
if(keys[i] != EMPTY_BUCKET_VALUE) {
arr[counter++] = keys[i];
}
}
Arrays.sort(arr);
return arr;
}
}
| 8,369 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/state | Create_ds/zeno/src/main/java/com/netflix/zeno/fastblob/state/compressed/FastBlobTypeByteSequenceState.java | package com.netflix.zeno.fastblob.state.compressed;
import com.netflix.zeno.fastblob.record.ByteData;
import com.netflix.zeno.fastblob.record.ByteDataBuffer;
import com.netflix.zeno.fastblob.state.compressed.ByteSequenceRetainer.ByteSequenceRetainerIterator;
import java.util.BitSet;
public class FastBlobTypeByteSequenceState {
private ByteSequenceRetainer thisCycleRetainer;
private ByteSequenceRetainer nextCycleRetainer;
/*private String thisCycleVersion;
private String nextCycleVersion;*/
public FastBlobTypeByteSequenceState() {
this.thisCycleRetainer = new ByteSequenceRetainer();
this.nextCycleRetainer = new ByteSequenceRetainer();
}
public void add(int ordinal, ByteData data, long pointer, int objectLength) {
nextCycleRetainer.addByteSequence(ordinal, data, pointer, objectLength);
}
public int get(int ordinal, ByteDataBuffer writeTo) {
return thisCycleRetainer.retrieveSequence(ordinal, writeTo);
}
public void prepareForDelta(BitSet removedOrdinals) {
ByteSequenceRetainerIterator iterator = thisCycleRetainer.iterator();
while(iterator.nextKey()) {
if(!removedOrdinals.get(iterator.getCurrentOrdinal())) {
iterator.copyEntryTo(nextCycleRetainer);
}
}
}
public void flip(String nextCycle) {
ByteSequenceRetainer tempRetainer = thisCycleRetainer;
thisCycleRetainer = nextCycleRetainer;
nextCycleRetainer = tempRetainer;
}
}
| 8,370 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/diff/DiffByteArray.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.diff;
import java.util.Arrays;
import org.apache.commons.codec.binary.Base64;
/**
*
* Wrapper around a byte array, necessary to implement equals() and hashCode().
*
* @author dkoszewnik
*
*/
public class DiffByteArray {
private final byte bytes[];
public DiffByteArray(byte bytes[]) {
this.bytes = bytes;
}
@Override
public int hashCode() {
return Arrays.hashCode(bytes);
}
@Override
public boolean equals(Object obj) {
if(obj instanceof DiffByteArray) {
return Arrays.equals(bytes, ((DiffByteArray) obj).bytes);
}
return false;
}
@Override
public String toString() {
return Base64.encodeBase64String(bytes);
}
}
| 8,371 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/diff/TypeDiffInstruction.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.diff;
/**
* The TypeDiffInstruction should be overridden to describe how find matching
* pairs of Objects of a given type.<p/>
*
* From each Object of this type, a primary key must be constructed/extracted.
* This key must meaningfully override hashCode() and equals(), and should be
* unique for a given type in the FastBlobStateEngine.<p/>
*
* The TypeDiffInstruction will automatically find pairs of Objects based on
* these primary keys, and each pair of Objects will be traversed to find the
* diff.<p/>
*
* (from {@link DiffRecord})<br/>
* Conceptually, The diff of two Objects is calculated by the following process:
*
* <ol>
* <li>reduce all properties in each Object to sets of key/value pairs.</li>
* <li>pull out matching pairs of key/value pairs from both Objects.</li>
* <li>when there are no more matches left, the diff score between the
* two Objects is sum of the remaining key/value pairs for both Objects.</li>
* </ol>
*
* @author dkoszewnik
*
*/
public abstract class TypeDiffInstruction<T> {
public abstract String getSerializerName();
public abstract Object getKey(T object);
/**
* Indicates whether or not this key will be unique across all objects for
* this type.
*
* Defaults to true.
*/
public boolean isUniqueKey() {
return true;
}
/**
* Indicates the name by which this type will be identified in the diff.
*
* Defaults to the serializer name.
*/
public String getTypeIdentifier() {
return getSerializerName();
}
@SuppressWarnings("unchecked")
public Object getKeyFromObject(Object obj) {
return getKey((T) obj);
}
}
| 8,372 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/diff/DiffSerializationFramework.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.diff;
import com.netflix.zeno.serializer.SerializationFramework;
import com.netflix.zeno.serializer.SerializerFactory;
/**
* This class is used in the context of the Zeno diff operation. It's unlikely that users will
* want to use this directly. Instead, TypeDiffOperation contains the main interface for performing a diff
* between two arbitrary data states.<p/>
*
* See the class DiffExample under source folder src/examples/java for an example of how to perform a diff on two data sets.
*
* @author dkoszewnik
*
*/
public class DiffSerializationFramework extends SerializationFramework {
public DiffSerializationFramework(SerializerFactory serializerFactory) {
super(serializerFactory);
this.frameworkSerializer = new DiffFrameworkSerializer(this);
}
}
| 8,373 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/diff/DiffPropertyPath.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.diff;
import com.netflix.zeno.serializer.NFTypeSerializer;
import java.util.Arrays;
import java.util.concurrent.ConcurrentHashMap;
/**
* The diff operation must flatten out an object hierarchy into a set of key/value pairs. The keys are
* represented by the "path" of properties, (i.e. sequence of {@link NFTypeSerializer} fields) which must
* be traversed in order to arrive at the value.<p/>
*
* This class describes a path through the object hierarchy to a property, starting with a top level serializer.<p/>
*
* @author dkoszewnik
*
*/
public class DiffPropertyPath implements Cloneable, Comparable<DiffPropertyPath> {
private static final ConcurrentHashMap<DiffPropertyPath, DiffPropertyPath> canonicalDiffBreadcrumbs = new ConcurrentHashMap<DiffPropertyPath, DiffPropertyPath>();
private String topNodeSerializer;
private final String fieldBreadcrumbs[];
private int breadcrumbLength;
private int hashCode;
public DiffPropertyPath() {
this.fieldBreadcrumbs = new String[256];
this.breadcrumbLength = 0;
}
private DiffPropertyPath(DiffPropertyPath copy) {
this.topNodeSerializer = copy.topNodeSerializer;
this.fieldBreadcrumbs = copy.getFieldBreadcrumbsCopy();
this.breadcrumbLength = fieldBreadcrumbs.length;
this.hashCode = copy.hashCode;
}
DiffPropertyPath(String topNodeSerializer, String fieldBreadcrumbs[]) {
this.topNodeSerializer = topNodeSerializer;
this.fieldBreadcrumbs = fieldBreadcrumbs;
this.breadcrumbLength = fieldBreadcrumbs.length;
}
String getTopNodeSerializer() {
return topNodeSerializer;
}
String[] getBreadcrumbArray() {
return fieldBreadcrumbs;
}
int getBreadcrumbLength() {
return breadcrumbLength;
}
public void setTopNodeSerializer(String topNodeSerializer) {
hashCode = 0;
this.topNodeSerializer = topNodeSerializer;
}
public void addBreadcrumb(String field) {
if(hashCode != 0)
hashCode ^= breadcrumbLength * field.hashCode();
fieldBreadcrumbs[breadcrumbLength] = field;
breadcrumbLength++;
}
public void removeBreadcrumb() {
breadcrumbLength--;
if(hashCode != 0)
hashCode ^= (breadcrumbLength) * fieldBreadcrumbs[breadcrumbLength].hashCode();
}
public void reset() {
hashCode = 0;
breadcrumbLength = 0;
}
public DiffPropertyPath copy() {
DiffPropertyPath copy = canonicalDiffBreadcrumbs.get(this);
if(copy == null) {
DiffPropertyPath newCopy = new DiffPropertyPath(this);
copy = canonicalDiffBreadcrumbs.putIfAbsent(newCopy, newCopy);
if(copy == null)
copy = newCopy;
}
return copy;
}
@Override
public boolean equals(Object anotherObject) {
if(anotherObject instanceof DiffPropertyPath) {
DiffPropertyPath other = (DiffPropertyPath)anotherObject;
if(other.breadcrumbLength == this.breadcrumbLength) {
if(other.topNodeSerializer.equals(this.topNodeSerializer)) {
for(int i=breadcrumbLength - 1; i >= 0; i--) {
if(!other.fieldBreadcrumbs[i].equals(this.fieldBreadcrumbs[i]))
return false;
}
return true;
}
}
}
return false;
}
@Override
public int hashCode() {
if(hashCode != 0)
return hashCode;
int result = 1 + 32 * (topNodeSerializer.hashCode());
for(int i=0;i<breadcrumbLength;i++) {
result ^= i * fieldBreadcrumbs[i].hashCode();
}
hashCode = result;
return result;
}
@Override
public int compareTo(DiffPropertyPath o) {
int comp = this.topNodeSerializer.compareTo(o.topNodeSerializer);
if(comp == 0) {
comp = breadcrumbLength - o.breadcrumbLength;
for(int i=breadcrumbLength - 1;i >= 0 && comp == 0;i--) {
comp = this.fieldBreadcrumbs[i].compareTo(o.fieldBreadcrumbs[i]);
}
}
return comp;
}
private String[] getFieldBreadcrumbsCopy() {
return Arrays.copyOf(fieldBreadcrumbs, breadcrumbLength);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder(topNodeSerializer);
for(int i=0;i<breadcrumbLength;i++) {
builder.append(".");
builder.append(fieldBreadcrumbs[i]);
}
return builder.toString();
}
}
| 8,374 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/diff/DiffFrameworkSerializer.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.diff;
import com.netflix.zeno.serializer.FrameworkSerializer;
import com.netflix.zeno.serializer.SerializationFramework;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
/**
*
* Defines operations required to populate individual POJO primitive elements into DiffRecords
*
* @author dkoszewnik
*
*/
public class DiffFrameworkSerializer extends FrameworkSerializer<DiffRecord> {
private static final Object NULL_OBJECT = new Integer(Integer.MIN_VALUE + 100);
public DiffFrameworkSerializer(SerializationFramework framework) {
super(framework);
}
@Override
public void serializePrimitive(DiffRecord rec, String fieldName, Object value) {
rec.serializePrimitive(fieldName, value);
}
@Override
public void serializeBytes(DiffRecord rec, String fieldName, byte[] value) {
rec.serializePrimitive(fieldName, new DiffByteArray(value));
}
@Override
@SuppressWarnings("unchecked")
public void serializeObject(DiffRecord rec, String fieldName, String typeName, Object obj) {
if(obj == null) {
serializePrimitive(rec, fieldName, NULL_OBJECT);
return;
}
rec.serializeObject(fieldName);
getSerializer(typeName).serialize(obj, rec);
rec.finishedObject();
}
@Override
public void serializeObject(DiffRecord rec, String fieldName, Object obj) {
serializeObject(rec, fieldName, rec.getSchema().getObjectType(fieldName), obj);
}
@Override
public <T> void serializeList(DiffRecord rec, String fieldName, String typeName, Collection<T> obj) {
serializeCollection(rec, fieldName, typeName, obj);
}
@Override
public <T> void serializeSet(DiffRecord rec, String fieldName, String typeName, Set<T> obj) {
serializeCollection(rec, fieldName, typeName, obj);
}
private <T> void serializeCollection(DiffRecord rec, String fieldName, String typeName, Collection<T> obj) {
if(obj == null) {
serializePrimitive(rec, fieldName, NULL_OBJECT);
return;
}
rec.serializeObject(fieldName);
for(T t : obj) {
serializeObject(rec, "element", typeName, t);
}
rec.finishedObject();
}
@Override
public <K, V> void serializeMap(DiffRecord rec, String fieldName, String keyTypeName, String valueTypeName, Map<K, V> obj) {
if(obj == null) {
serializePrimitive(rec, fieldName, NULL_OBJECT);
return;
}
rec.serializeObject(fieldName);
for(Map.Entry<K, V> entry : obj.entrySet()) {
serializeObject(rec, "key", keyTypeName, entry.getKey());
serializeObject(rec, "value", valueTypeName, entry.getValue());
}
rec.finishedObject();
}
}
| 8,375 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/diff/DiffRecordValueListMap.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.diff;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* A map of key/value pairs contained in a {@link DiffRecord}.<p/>
*
* The DiffRecord flattens out a top-level object into key/value pairs. This data structure
* is used to hold the list of values corresponding to each "property path". <p/>
*
* This data structure is intended to be reused after clear() is called with minimum object
* creation overhead.
*
* @author dkoszewnik
*
*/
public class DiffRecordValueListMap {
private final Map<DiffPropertyPath, Integer> fieldValuesLists;
private final List<List<Object>> valuesLists;
private int nextValueIndex;
public DiffRecordValueListMap() {
this.fieldValuesLists = new HashMap<DiffPropertyPath, Integer>();
this.valuesLists = new ArrayList<List<Object>>();
}
/**
* Add a value to be associated with the supplied DiffPropertyPath
*/
public void addValue(DiffPropertyPath path, Object obj) {
getOrCreateList(path).add(obj);
}
/**
* Get the list of values associated with the supplied DiffPropertyPath
*/
public List<Object> getList(DiffPropertyPath path) {
Integer listIndex = fieldValuesLists.get(path);
if(listIndex == null)
return null;
return getList(listIndex.intValue());
}
private List<Object> getOrCreateList(DiffPropertyPath path) {
Integer listIndex = fieldValuesLists.get(path);
if(listIndex == null) {
listIndex = Integer.valueOf(nextValueIndex++);
/// create a copy of this DiffPropertyPath, as the propertyPath which is passed in
/// is modified throughout the traversal.
fieldValuesLists.put(path.copy(), listIndex);
}
return getList(listIndex.intValue());
}
private List<Object> getList(int listIndex) {
while(valuesLists.size() <= listIndex) {
valuesLists.add(new ArrayList<Object>());
}
return valuesLists.get(listIndex);
}
public void clear() {
for(List<Object> list : valuesLists) {
list.clear();
}
fieldValuesLists.clear();
nextValueIndex = 0;
}
public Iterable<DiffPropertyPath> keySet() {
return fieldValuesLists.keySet();
}
}
| 8,376 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/diff/TypeDiff.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.diff;
import com.netflix.zeno.serializer.NFTypeSerializer;
import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* This Object represents the result of a diff for a specific type in the {@link NFTypeSerializer} object hierarchy.
*
* It is organized into the following:
* - The list of Objects which were extra in the "from" FastBlobStateEngine (had no corresponding Object in the "to" engine)
* - The list of Objects which were extra in the "to" FastBlobStateEngine (had no corresponding Object in the "from" engine)
* - a list of {@link ObjectDiffScore}s, each of which contains a matching pair of Objects and the diff score between them.
*
* (from {@link DiffRecord})
* Conceptually, The diff of two Objects is calculated by the following process:
* 1) reduce all properties in each Object to sets of key/value pairs.
* 2) pull out matching pairs of key/value pairs from both Objects.
* 3) When there are no more matches left, the diff score between the two Objects is sum of the remaining key/value pairs for both Objects.
*
* @author dkoszewnik
*
*/
public class TypeDiff<T> {
private final String topNodeSerializer;
private final Map<DiffPropertyPath, FieldDiffScore<T>> fieldDifferences;
private final List<T> extraInFrom;
private final List<T> extraInTo;
private final List<ObjectDiffScore<T>> objectDiffs;
private int itemCountFrom;
private int itemCountTo;
public TypeDiff(String topNodeSerializer) {
fieldDifferences = new HashMap<DiffPropertyPath, FieldDiffScore<T>>();
extraInFrom = new ArrayList<T>();
extraInTo = new ArrayList<T>();
objectDiffs = new ArrayList<ObjectDiffScore<T>>();
this.topNodeSerializer = topNodeSerializer;
}
TypeDiff(String topNodeSerializer, List<T> missingFrom, List<T> missingTo, List<ObjectDiffScore<T>> objectDiffs, Map<DiffPropertyPath, FieldDiffScore<T>> fieldDifferences, int itemCountFrom, int itemCountTo) {
this.topNodeSerializer = topNodeSerializer;
this.extraInFrom = missingFrom;
this.extraInTo = missingTo;
this.objectDiffs = objectDiffs;
this.fieldDifferences = fieldDifferences;
this.itemCountFrom = itemCountFrom;
this.itemCountTo = itemCountTo;
}
public void addFieldObjectDiffScore(DiffPropertyPath fieldBreadcrumbs, T to, T from, int diffScore) {
if(diffScore > 0) {
ObjectDiffScore<T> fieldDiffScore = new ObjectDiffScore<T>(from, to, diffScore);
FieldDiffScore<T> fieldDiff = getFieldDiffScore(fieldBreadcrumbs);
fieldDiff.addObjectDiffScore(fieldDiffScore);
}
}
public void incrementFieldScores(DiffPropertyPath fieldBreadcrumbs, int diffIncrement, int totalIncrement) {
if(diffIncrement != 0 || totalIncrement != 0) {
FieldDiffScore<T> fieldDiff = getFieldDiffScore(fieldBreadcrumbs);
fieldDiff.incrementDiffCountBy(diffIncrement);
fieldDiff.incrementTotalCountBy(totalIncrement);
}
}
public void incrementFieldDiff(DiffPropertyPath fieldBreadcrumbs, int increment) {
if(increment != 0) {
FieldDiffScore<T> fieldDiff = getFieldDiffScore(fieldBreadcrumbs);
fieldDiff.incrementDiffCountBy(increment);
}
}
public void incrementFieldTotal(DiffPropertyPath fieldBreadcrumbs, int increment) {
if(increment != 0) {
FieldDiffScore<T> fieldDiff = getFieldDiffScore(fieldBreadcrumbs);
fieldDiff.incrementTotalCountBy(increment);
}
}
private FieldDiffScore<T> getFieldDiffScore(DiffPropertyPath fieldBreadcrumbs) {
FieldDiffScore<T> counter = fieldDifferences.get(fieldBreadcrumbs);
if(counter == null) {
counter = new FieldDiffScore<T>();
fieldDifferences.put(fieldBreadcrumbs, counter);
}
return counter;
}
public void addExtraInFrom(T missing) {
extraInFrom.add(missing);
}
public void addExtraInTo(T missing) {
extraInTo.add(missing);
}
public void addDiffObject(T from, T to, int score) {
objectDiffs.add(new ObjectDiffScore<T>(from, to, score));
}
public void incrementFrom() {
itemCountFrom++;
}
public void incrementFrom(int byCount) {
itemCountFrom += byCount;
}
public void incrementTo() {
itemCountTo++;
}
public void incrementTo(int byCount) {
itemCountTo += byCount;
}
public String getTopNodeSerializer() {
return topNodeSerializer;
}
public List<T> getExtraInFrom() {
return extraInFrom;
}
public List<T> getExtraInTo() {
return extraInTo;
}
public int getItemCountFrom() {
return itemCountFrom;
}
public int getItemCountTo() {
return itemCountTo;
}
public int getTotalDiffs() {
int totalDiffs = 0;
for(ObjectDiffScore<?> objectDiffScore : objectDiffs) {
totalDiffs += objectDiffScore.getScore();
}
return totalDiffs;
}
public List<FieldDiff<T>> getSortedFieldDifferencesDescending() {
List<FieldDiff<T>> fieldDiffs = new ArrayList<FieldDiff<T>>(fieldDifferences.size());
for(DiffPropertyPath key : fieldDifferences.keySet()) {
fieldDiffs.add(new FieldDiff<T>(key, fieldDifferences.get(key)));
}
Collections.sort(fieldDiffs);
return fieldDiffs;
}
public Map<DiffPropertyPath, FieldDiffScore<T>> getFieldDifferences() {
return fieldDifferences;
}
public List<ObjectDiffScore<T>> getDiffObjects() {
return objectDiffs;
}
public List<ObjectDiffScore<T>> getSortedDiffObjects() {
List<ObjectDiffScore<T>> sortedList = new ArrayList<ObjectDiffScore<T>>(objectDiffs.size());
sortedList.addAll(objectDiffs);
Collections.sort(sortedList);
return sortedList;
}
public List<ObjectDiffScore<T>> getSortedDiffObjectsByFields(List<DiffPropertyPath> includeFields) {
Map<ObjectDiffScore<T>, ObjectDiffScore<T>> aggregatedScores = new HashMap<ObjectDiffScore<T>, ObjectDiffScore<T>>();
for(DiffPropertyPath field : includeFields) {
FieldDiffScore<T> fieldDiffScore = fieldDifferences.get(field);
if(fieldDiffScore != null) {
for(ObjectDiffScore<T> fieldObjectDiff : fieldDiffScore.getDiffScores()) {
ObjectDiffScore<T> objectDiffCopy = aggregatedScores.get(fieldObjectDiff);
if(objectDiffCopy == null) {
objectDiffCopy = new ObjectDiffScore<T>(fieldObjectDiff.getFrom(), fieldObjectDiff.getTo(), 0);
aggregatedScores.put(objectDiffCopy, objectDiffCopy);
}
objectDiffCopy.incrementScoreBy(fieldObjectDiff.getScore());
}
}
}
List<ObjectDiffScore<T>> scores = new ArrayList<ObjectDiffScore<T>>(aggregatedScores.keySet());
Collections.sort(scores);
return scores;
}
public static class FieldDiff<T> implements Comparable<FieldDiff<T>> {
private final DiffPropertyPath propertyPath;
private final FieldDiffScore<T> diffScore;
public FieldDiff(DiffPropertyPath propertyPath, FieldDiffScore<T> diffScore) {
this.propertyPath = propertyPath;
this.diffScore = diffScore;
}
public DiffPropertyPath getPropertyPath() {
return propertyPath;
}
/**
* @deprecated use getPropertyPath() instead
*/
@Deprecated
public DiffPropertyPath getBreadcrumbs() {
return propertyPath;
}
public FieldDiffScore<T> getDiffScore() {
return diffScore;
}
@Override
public int compareTo(FieldDiff<T> o) {
return diffScore.compareTo(o.diffScore);
}
@Override
public String toString() {
return propertyPath.toString() + ": " + diffScore.toString();
}
}
public static class FieldDiffScore<T> implements Comparable<FieldDiffScore<T>> {
private int diffCount;
private int totalCount;
private final List<ObjectDiffScore<T>> objectScores;
public FieldDiffScore() {
this.objectScores = new ArrayList<ObjectDiffScore<T>>();
}
public void incrementDiffCountBy(int count) {
diffCount += count;
}
public void incrementTotalCountBy(int count) {
totalCount += count;
}
public int getDiffCount() {
return diffCount;
}
public int getTotalCount() {
return totalCount;
}
public List<ObjectDiffScore<T>> getDiffScores() {
return objectScores;
}
public double getDiffPercent() {
return (double)diffCount / (double)totalCount;
}
public void addObjectDiffScore(ObjectDiffScore<T> score) {
objectScores.add(score);
}
@Override
public int compareTo(FieldDiffScore<T> o) {
double thisDiffPercent = getDiffPercent();
double otherDiffPercent = o.getDiffPercent();
if(thisDiffPercent == otherDiffPercent)
return 0;
return thisDiffPercent > otherDiffPercent ? -1 : 1;
}
@Override
public String toString() {
NumberFormat nf = NumberFormat.getInstance();
nf.setMaximumFractionDigits(3);
return nf.format(getDiffPercent() * 100) + "% (" + diffCount + "/" + totalCount + ")";
}
}
public static class ObjectDiffScore<T> implements Comparable<ObjectDiffScore<T>>{
private final T fromObject;
private final T toObject;
private int score;
public ObjectDiffScore(T fromObject, T toObject, int score) {
this.fromObject = fromObject;
this.toObject = toObject;
this.score = score;
}
public T getFrom() {
return fromObject;
}
public T getTo() {
return toObject;
}
public int getScore() {
return score;
}
private void incrementScoreBy(int increment) {
score += increment;
}
@Override
public int compareTo(ObjectDiffScore<T> o) {
return o.score - score;
}
@Override
public int hashCode() {
return fromObject.hashCode() + (31 * toObject.hashCode());
}
@Override
@SuppressWarnings("unchecked")
public boolean equals(Object other) {
if(other instanceof ObjectDiffScore) {
ObjectDiffScore<T> otherScore = (ObjectDiffScore<T>) other;
return otherScore.getFrom().equals(fromObject) && otherScore.getTo().equals(toObject);
}
return false;
}
}
}
| 8,377 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/diff/DiffReport.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.diff;
import com.netflix.zeno.diff.TypeDiff.ObjectDiffScore;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
/**
* A diff report is the result of a diff operation.
*
* This data structure allows for investigation of the differences found between two deserialized blobs.
*
* The diff is organized into "extra" objects, which are objects which had no match, and "different" object pairs,
* which are pairs of objects which were matched, but had differences.
*
* The DiffReport contains the sum of all diffs, the sum of all "extra" objects, and a series of TypeDiff objects, one per
* {@link TypeDiffInstruction} supplied to the {@link DiffInstruction} which generated this report.
*
* See the class DiffExample under source folder src/examples/java for an example of how to use this.
*
* @author dkoszewnik
*
*/
public class DiffReport {
private final DiffHeader header;
private final List<TypeDiff<?>> typeDiffs;
private int totalDiffs;
private int totalExtra;
public DiffReport(final DiffHeader header, final List<TypeDiff<?>> typeDiffs) {
this.header = header;
this.typeDiffs = typeDiffs;
for(final TypeDiff<?> td : typeDiffs) {
for(final ObjectDiffScore<?> objectDiffScore : td.getDiffObjects()) {
totalDiffs += objectDiffScore.getScore();
}
totalExtra += td.getExtraInFrom().size() + td.getExtraInTo().size();
}
}
@SuppressWarnings("unchecked")
public <T> TypeDiff<T> getTypeDiff(final String topLevelSerializer) {
for(final TypeDiff<?> typeDiff : typeDiffs) {
if(typeDiff.getTopNodeSerializer().equals(topLevelSerializer)) {
return (TypeDiff<T>)typeDiff;
}
}
return null;
}
public List<TypeDiff<?>> getTypeDiffsSortedByDiffScore() {
final List<TypeDiff<?>> typeDiffs = new ArrayList<TypeDiff<?>>(this.typeDiffs.size());
typeDiffs.addAll(this.typeDiffs);
Collections.sort(typeDiffs, new Comparator<TypeDiff<?>>() {
@Override
public int compare(final TypeDiff<?> o1, final TypeDiff<?> o2) {
return o2.getTotalDiffs() - o1.getTotalDiffs();
}
});
return typeDiffs;
}
public List<TypeDiff<?>> getTypeDiffsSortedByExtraObjects() {
final List<TypeDiff<?>> typeDiffs = new ArrayList<TypeDiff<?>>(this.typeDiffs.size());
typeDiffs.addAll(this.typeDiffs);
Collections.sort(typeDiffs, new Comparator<TypeDiff<?>>() {
@Override
public int compare(final TypeDiff<?> o1, final TypeDiff<?> o2) {
final int extra2 = o2.getExtraInFrom().size() + o2.getExtraInTo().size();
final int extra1 = o1.getExtraInFrom().size() + o1.getExtraInTo().size();
return extra2 - extra1;
}
});
return typeDiffs;
}
public List<TypeDiff<?>> getTypeDiffsSortedByMissingFromObjects() {
final List<TypeDiff<?>> typeDiffs = new ArrayList<TypeDiff<?>>(this.typeDiffs.size());
typeDiffs.addAll(this.typeDiffs);
Collections.sort(typeDiffs, new Comparator<TypeDiff<?>>() {
@Override
public int compare(final TypeDiff<?> o1, final TypeDiff<?> o2) {
final int extra2 = o2.getExtraInFrom().size();
final int extra1 = o1.getExtraInFrom().size();
return extra2 - extra1;
}
});
return typeDiffs;
}
public List<TypeDiff<?>> getTypeDiffsSortedByMissingToObjects() {
final List<TypeDiff<?>> typeDiffs = new ArrayList<TypeDiff<?>>(this.typeDiffs.size());
typeDiffs.addAll(this.typeDiffs);
Collections.sort(typeDiffs, new Comparator<TypeDiff<?>>() {
@Override
public int compare(final TypeDiff<?> o1, final TypeDiff<?> o2) {
final int extra2 = o2.getExtraInTo().size();
final int extra1 = o1.getExtraInTo().size();
return extra2 - extra1;
}
});
return typeDiffs;
}
public int getTotalDiffs() {
return totalDiffs;
}
public int getTotalExtra() {
return totalExtra;
}
public DiffHeader getHeader() {
return header;
}
}
| 8,378 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/diff/DiffRecord.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.diff;
import com.netflix.zeno.fastblob.record.schema.FastBlobSchema;
import com.netflix.zeno.serializer.NFSerializationRecord;
import com.netflix.zeno.serializer.NFTypeSerializer;
/**
* A diff record represents the combined values for all fields at all levels in an {@link NFTypeSerializer} hierarchy.
*
* Conceptually, The diff of two Objects is calculated by the following process:
* 1) reduce all properties in each Object to sets of key/value pairs.
* 2) pull out matching pairs of key/value pairs from both Objects.
* 3) When there are no more matches left, the diff score between the two Objects is sum of the remaining key/value pairs for both Objects.
*
* The DiffPropertyPath contained here is updated to reflect the current path during serialization. This is an optimization which
* allows us to not create new {@link DiffPropertyPath} objects at each step during serialization. The {@link DiffRecordValueListMap} contains key/value
* pairs.
*
* @author dkoszewnik
*
*/
public class DiffRecord extends NFSerializationRecord {
private final DiffPropertyPath propertyPath;
private final DiffRecordValueListMap fieldValues;
private FastBlobSchema schema;
public DiffRecord() {
this.propertyPath = new DiffPropertyPath();
this.fieldValues = new DiffRecordValueListMap();
}
public void setSchema(FastBlobSchema schema) {
this.schema = schema;
}
public FastBlobSchema getSchema() {
return schema;
}
public void setTopLevelSerializerName(String topNodeSerializer) {
propertyPath.setTopNodeSerializer(topNodeSerializer);
}
public void serializeObject(String fieldName) {
propertyPath.addBreadcrumb(fieldName);
}
public void finishedObject() {
propertyPath.removeBreadcrumb();
}
public void serializePrimitive(String fieldName, Object value) {
propertyPath.addBreadcrumb(fieldName);
fieldValues.addValue(propertyPath, value);
propertyPath.removeBreadcrumb();
}
public void clear() {
propertyPath.reset();
fieldValues.clear();
}
public DiffRecordValueListMap getValueListMap() {
return fieldValues;
}
DiffRecordValueListMap getFieldValues() {
return fieldValues;
}
}
| 8,379 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/diff/DiffOperation.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.diff;
import com.netflix.zeno.fastblob.FastBlobStateEngine;
import com.netflix.zeno.serializer.SerializerFactory;
import java.util.ArrayList;
import java.util.List;
public class DiffOperation {
private final SerializerFactory serializerFactory;
private final DiffInstruction instruction;
/**
* Instantiate a DiffOperation, capable of performing a diff between two data states.
*
* @param dataModel - The SerializerFactory describing the data model to use.
* @param instruction - the details about how to find top level objects in a data state
*/
public DiffOperation(SerializerFactory dataModel, DiffInstruction instruction) {
this.serializerFactory = dataModel;
this.instruction = instruction;
}
/**
* Perform a diff between two data states.
*
* Note: For now, this operation will ignore type instructions for non-unique keys.
*
* @param fromState - The "from" state engine, populated with one of the deserialized data states to compare
* @param toState - the "to" state engine, populated with the other deserialized data state to compare.
* @param factory - The SerializerFactory describing the data model to use.
* @return the DiffReport for investigation of the differences between the two data states.
* @throws DiffReportGenerationException
*/
public DiffReport performDiff(FastBlobStateEngine fromState, FastBlobStateEngine toState) throws DiffReportGenerationException {
return performDiff(null, fromState, toState);
}
public DiffReport performDiff(DiffHeader diffHeader, final FastBlobStateEngine fromState, final FastBlobStateEngine toState) throws DiffReportGenerationException {
try {
final List<TypeDiff<?>> diffs = new ArrayList<TypeDiff<?>>();
final DiffSerializationFramework framework = new DiffSerializationFramework(serializerFactory);
for (final TypeDiffInstruction<?> instruction : this.instruction.getTypeInstructions()) {
/// for now, the DiffOperation ignores non-unique keys.
if(instruction.isUniqueKey()) {
Iterable<?> fromDeserializationState = fromState.getTypeDeserializationState(instruction.getSerializerName());
Iterable<?> toDeserializationState = toState.getTypeDeserializationState(instruction.getSerializerName());
TypeDiff<Object> typeDiff = performDiff(framework, instruction, fromDeserializationState, toDeserializationState);
diffs.add(typeDiff);
}
}
return new DiffReport(diffHeader, diffs);
} catch (Exception e) {
throw new DiffReportGenerationException(e);
}
}
@SuppressWarnings("unchecked")
private <T> TypeDiff<T> performDiff(DiffSerializationFramework framework, TypeDiffInstruction<?> diff, Iterable<?> from, Iterable<?> to) {
TypeDiffInstruction<T> castDiff = (TypeDiffInstruction<T>) diff;
Iterable<T> castFrom = (Iterable<T>) from;
Iterable<T> castTo = (Iterable<T>) to;
return new TypeDiffOperation<T>(castDiff).performDiff(framework, castFrom, castTo, Runtime.getRuntime().availableProcessors());
}
}
| 8,380 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/diff/DiffHeader.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.diff;
/**
* Header fields for a {@link DiffReport}
*
* @author dkoszewnik
*
*/
public class DiffHeader {
private final String vip1;
private final String vip2;
private final String blob1;
private final String blob2;
private final String version1;
private final String version2;
public DiffHeader(String vip1, String blob1, String version1, String vip2, String blob2, String version2) {
this.vip1 = vip1;
this.vip2 = vip2;
this.blob1 = blob2;
this.blob2 = blob2;
this.version1 = version1;
this.version2 = version2;
}
public String getVip1() {
return vip1;
}
public String getVip2() {
return vip2;
}
public String getBlob1() {
return blob1;
}
public String getBlob2() {
return blob2;
}
public String getVersion1() {
return version1;
}
public String getVersion2() {
return version2;
}
public String getFormattedfromString() {
return String.format("%s-%s-%s", vip1, blob1, version1);
}
public String getFormattedToString() {
return String.format("%s-%s-%s", vip2, blob2, version2);
}
}
| 8,381 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/diff/TypeDiffOperation.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.diff;
import com.netflix.zeno.diff.TypeDiff.FieldDiffScore;
import com.netflix.zeno.serializer.NFTypeSerializer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadFactory;
import org.apache.commons.lang.mutable.MutableInt;
/**
* This is the main interface for performing a diff between two arbitrary data states.<p/>
*
* @author dkoszewnik
*
*/
public class TypeDiffOperation<T> {
private final TypeDiffInstruction<T> instruction;
public TypeDiffOperation(TypeDiffInstruction<T> instruction) {
this.instruction = instruction;
}
@SuppressWarnings("unchecked")
public TypeDiff<T> performDiff(DiffSerializationFramework framework, Iterable<T> fromState, Iterable<T> toState) {
return performDiff(framework, fromState, toState, Runtime.getRuntime().availableProcessors());
}
@SuppressWarnings("unchecked")
public TypeDiff<T> performDiff(DiffSerializationFramework framework, Iterable<T> fromState, Iterable<T> toState, int numThreads) {
Map<Object, T> fromStateObjects = new HashMap<Object, T>();
for(T obj : fromState) {
fromStateObjects.put(instruction.getKey(obj), obj);
}
ArrayList<List<T>> perProcessorWorkList = new ArrayList<List<T>>(numThreads); // each entry is a job
for (int i =0; i < numThreads; ++i) {
perProcessorWorkList.add(new ArrayList<T>());
}
Map<Object, Object> toStateKeys = new ConcurrentHashMap<Object, Object>();
int toIncrCount = 0;
for(T toObject : toState) {
perProcessorWorkList.get(toIncrCount % numThreads).add(toObject);
toIncrCount++;
}
ExecutorService executor = Executors.newFixedThreadPool(numThreads, new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
final Thread thread = new Thread(r, "TypeDiff_" + instruction.getTypeIdentifier());
thread.setDaemon(true);
return thread;
}
});
try {
ArrayList<Future<TypeDiff<T>>> workResultList = new ArrayList<Future<TypeDiff<T>>>(perProcessorWorkList.size());
for (final List<T> workList : perProcessorWorkList) {
if (workList != null && !workList.isEmpty()) {
workResultList.add(executor.submit(new TypeDiffCallable<T>(framework, instruction, fromStateObjects, toStateKeys, workList)));
}
}
TypeDiff<T> mergedDiff = new TypeDiff<T>(instruction.getTypeIdentifier());
for (final Future<TypeDiff<T>> future : workResultList) {
try {
TypeDiff<T> typeDiff = future.get();
mergeTypeDiff(mergedDiff, typeDiff);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
for(Map.Entry<Object, T> entry : fromStateObjects.entrySet()) {
mergedDiff.incrementFrom();
if(!toStateKeys.containsKey(entry.getKey()))
mergedDiff.addExtraInFrom(entry.getValue());
}
return mergedDiff;
} finally {
executor.shutdownNow();
}
}
private void mergeTypeDiff(TypeDiff<T> mergedDiff, TypeDiff<T> typeDiff) {
mergedDiff.getExtraInFrom().addAll(typeDiff.getExtraInFrom());
mergedDiff.getExtraInTo().addAll(typeDiff.getExtraInTo());
mergedDiff.getDiffObjects().addAll(typeDiff.getDiffObjects());
mergedDiff.incrementFrom(typeDiff.getItemCountFrom());
mergedDiff.incrementTo(typeDiff.getItemCountTo());
Map<DiffPropertyPath, FieldDiffScore<T>> mergedFieldDifferences = mergedDiff.getFieldDifferences();
Map<DiffPropertyPath, FieldDiffScore<T>> fieldDifferences = typeDiff.getFieldDifferences();
for (final DiffPropertyPath path : fieldDifferences.keySet()) {
FieldDiffScore<T> fieldDiffScore = fieldDifferences.get(path);
FieldDiffScore<T> mergedFieldDiffScore = mergedFieldDifferences.get(path);
if (mergedFieldDiffScore != null) {
mergedFieldDiffScore.incrementDiffCountBy(fieldDiffScore.getDiffCount());
mergedFieldDiffScore.incrementTotalCountBy(fieldDiffScore.getTotalCount());
mergedFieldDiffScore.getDiffScores().addAll(fieldDiffScore.getDiffScores());
} else {
mergedFieldDifferences.put(path, fieldDiffScore);
}
}
}
class TypeDiffCallable<Z> implements Callable<TypeDiff<Z>> {
private final TypeDiffInstruction<Z> instruction;
private final List<Z> workList;
private final Map<Object, Object> toStateKeys;
private final Map<Object, Z> fromStateObjects;
private final DiffSerializationFramework framework;
public TypeDiffCallable(DiffSerializationFramework framework, TypeDiffInstruction<Z> instruction, Map<Object, Z> fromStateObjects, Map<Object, Object> toStateKeys, List<Z> workList) {
this.framework = framework;
this.instruction = instruction;
this.fromStateObjects = fromStateObjects;
this.toStateKeys = toStateKeys;
this.workList = workList;
}
@Override
public TypeDiff<Z> call() throws Exception {
TypeDiff<Z> diff = new TypeDiff<Z>(instruction.getTypeIdentifier());
NFTypeSerializer<Z> typeSerializer = (NFTypeSerializer<Z>) framework.getSerializer(instruction.getSerializerName());
DiffRecord fromRec = new DiffRecord();
fromRec.setSchema(typeSerializer.getFastBlobSchema());
DiffRecord toRec = new DiffRecord();
toRec.setSchema(typeSerializer.getFastBlobSchema());
fromRec.setTopLevelSerializerName(instruction.getSerializerName());
toRec.setTopLevelSerializerName(instruction.getSerializerName());
for(Z toObject : workList) {
diff.incrementTo();
Object toStateKey = instruction.getKey(toObject);
toStateKeys.put(toStateKey, Boolean.TRUE);
Z fromObject = fromStateObjects.get(toStateKey);
if(fromObject == null) {
diff.addExtraInTo(toObject);
} else {
int diffScore = diffFields(diff, fromRec, toRec, typeSerializer, toObject, fromObject);
if(diffScore > 0)
diff.addDiffObject(fromObject, toObject, diffScore);
}
}
return diff;
}
private int diffFields(TypeDiff<Z> diff, DiffRecord fromRec, DiffRecord toRec, NFTypeSerializer<Z> typeSerializer, Z toObject, Z fromObject) {
typeSerializer.serialize(toObject, toRec);
typeSerializer.serialize(fromObject, fromRec);
int diffScore = incrementDiffFields(diff, toRec, fromRec, toObject, fromObject);
toRec.clear();
fromRec.clear();
return diffScore;
}
private int incrementDiffFields(TypeDiff<Z> diff, DiffRecord toRecord, DiffRecord fromRecord, Z toObject, Z fromObject) {
int objectDiffScore = 0;
for(DiffPropertyPath key : toRecord.getFieldValues().keySet()) {
List<Object> toObjects = toRecord.getFieldValues().getList(key);
List<Object> fromObjects = fromRecord.getFieldValues().getList(key);
int objectFieldDiffScore;
if(fromObjects == null) {
diff.incrementFieldScores(key, toObjects.size(), toObjects.size());
objectFieldDiffScore = toObjects.size();
} else {
objectFieldDiffScore = incrementDiffFields(diff, key, toObjects, fromObjects);
}
objectDiffScore += objectFieldDiffScore;
diff.addFieldObjectDiffScore(key, toObject, fromObject, objectFieldDiffScore);
}
for(DiffPropertyPath key : fromRecord.getFieldValues().keySet()) {
if(toRecord.getFieldValues().getList(key) == null) {
int diffSize = fromRecord.getFieldValues().getList(key).size();
diff.incrementFieldScores(key, diffSize, diffSize);
objectDiffScore += diffSize;
diff.addFieldObjectDiffScore(key, toObject, fromObject, diffSize);
}
}
return objectDiffScore;
}
private int incrementDiffFields(TypeDiff<?> diff, DiffPropertyPath breadcrumbs, List<Object> toObjects, List<Object> fromObjects) {
int objectFieldDiffScore = 0;
Map<Object, MutableInt> objectSet = getObjectMap();
for(Object obj : toObjects) {
increment(objectSet, obj);
}
for(Object obj : fromObjects) {
if(!decrement(objectSet, obj)) {
objectFieldDiffScore++;
}
}
if(!objectSet.isEmpty()) {
for(Map.Entry<Object, MutableInt>entry : objectSet.entrySet()) {
objectFieldDiffScore += entry.getValue().intValue();
}
}
objectSet.clear();
diff.incrementFieldScores(breadcrumbs, objectFieldDiffScore, toObjects.size() + fromObjects.size());
return objectFieldDiffScore;
}
private void increment(Map<Object, MutableInt> map, Object obj) {
MutableInt i = map.get(obj);
if(i == null) {
i = new MutableInt(0);
map.put(obj, i);
}
i.increment();
}
private boolean decrement(Map<Object, MutableInt> map, Object obj) {
MutableInt i = map.get(obj);
if(i == null) {
return false;
}
i.decrement();
if(i.intValue() == 0) {
map.remove(obj);
}
return true;
}
}
private static final ThreadLocal<Map<Object, MutableInt>> objectSet = new ThreadLocal<Map<Object, MutableInt>>();
private Map<Object, MutableInt> getObjectMap() {
Map<Object, MutableInt> objectSet = TypeDiffOperation.objectSet.get();
if(objectSet == null) {
objectSet = new HashMap<Object, MutableInt>();
TypeDiffOperation.objectSet.set(objectSet);
}
return objectSet;
}
}
| 8,382 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/diff/DiffReportGenerationException.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.diff;
public class DiffReportGenerationException extends RuntimeException {
/**
*
*/
private static final long serialVersionUID = -3048455667276378172L;
public DiffReportGenerationException() {
super();
// TODO Auto-generated constructor stub
}
public DiffReportGenerationException(final String message, final Throwable cause) {
super(message, cause);
// TODO Auto-generated constructor stub
}
public DiffReportGenerationException(final String message) {
super(message);
// TODO Auto-generated constructor stub
}
public DiffReportGenerationException(final Throwable cause) {
super(cause);
// TODO Auto-generated constructor stub
}
}
| 8,383 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/diff/DiffInstruction.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.diff;
import com.netflix.zeno.fastblob.FastBlobStateEngine;
import com.netflix.zeno.serializer.NFTypeSerializer;
import com.netflix.zeno.serializer.SerializerFactory;
/**
* The DiffInstruction describes how to derive a {@link DiffReport} on the deserialized Objects
* contained in two {@link FastBlobStateEngines}.<p/>
*
* In order to perform a diff, we must be able to match up equivalent Objects at the roots
* of the FastBlobStateEngine. For each type we want included in the diff report, we must
* specify a {@link TypeDiffInstruction}. Each TypeDiffInstruction informs how to match
* up individual elements of that type. Each pair of Objects will be examined for differences
* throughout the hierarchy defined by the {@link NFTypeSerializer}s.
*
* @author dkoszewnik
*
*/
public class DiffInstruction {
private final TypeDiffInstruction<?> instructionList[];
public DiffInstruction(TypeDiffInstruction<?>... instructions) {
instructionList = instructions;
}
public TypeDiffInstruction<?> getTypeInstruction(String topNodeSerializer) {
for (TypeDiffInstruction<?> instruction : instructionList) {
if (instruction.getSerializerName().equals(topNodeSerializer)) {
return instruction;
}
}
return null;
}
public TypeDiffInstruction<?>[] getTypeInstructions() {
return instructionList;
}
/**
* @deprecated instead use the interface provided by {@link DiffOperation}
*/
@Deprecated
public DiffReport performDiff(FastBlobStateEngine fromState, FastBlobStateEngine toState, SerializerFactory factory) throws DiffReportGenerationException {
return performDiff(null, fromState, toState, factory);
}
/**
* @deprecated instead use the interface provided by {@link DiffOperation}
*/
public DiffReport performDiff(DiffHeader diffHeader, final FastBlobStateEngine fromState, final FastBlobStateEngine toState, SerializerFactory factory) throws DiffReportGenerationException {
return new DiffOperation(factory, this).performDiff(diffHeader, fromState, toState);
}
}
| 8,384 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/diff | Create_ds/zeno/src/main/java/com/netflix/zeno/diff/history/DiffHistoricalTypeState.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.diff.history;
import java.util.Map;
import java.util.Set;
/**
* Contains the set of changes which occurred in a single type during a single data state update. Changes
* are broken down into:<p/>
*
* <ul>
* <li>The set of newly created instances</li>
* <li>The set of modified instances</li>
* <li>The set of deleted instances</li>
* </ul>
*
* @author dkoszewnik
*
*/
public class DiffHistoricalTypeState<K, V> {
private final Set<K> newObjects;
private final Map<K, V> diffObjects;
private final Map<K, V> deletedObjects;
public DiffHistoricalTypeState(Set<K> newObjects, Map<K, V>diffObjects, Map<K, V> deletedObjects) {
this.newObjects = newObjects;
this.diffObjects = diffObjects;
this.deletedObjects = deletedObjects;
}
public Set<K> getNewObjects() {
return newObjects;
}
public Map<K, V> getDiffObjects() {
return diffObjects;
}
public Map<K, V> getDeletedObjects() {
return deletedObjects;
}
public int numChanges() {
return newObjects.size() + diffObjects.size() + deletedObjects.size();
}
}
| 8,385 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/diff | Create_ds/zeno/src/main/java/com/netflix/zeno/diff/history/DiffHistoryTracker.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.diff.history;
import com.netflix.zeno.diff.DiffInstruction;
import com.netflix.zeno.diff.TypeDiffInstruction;
import com.netflix.zeno.fastblob.FastBlobStateEngine;
import com.netflix.zeno.util.SimultaneousExecutor;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
/**
* A data structure to track the history of changes in a single FastBlobStateEngine.<p/>
*
* Each time the state engine consumes a blob file, call "addState()". Then, at any time, pass in a key for an object identified in the
* TypeDiffInstruction to retrieve a historical record of that data.<p/>
*
* This data structure retains a history of all changes in a data set across some specified number of rolling updates. Because this retains a
* large amount of data, it can consume a significant memory footprint, and resource availability should be planned accordingly.<p/>
*
* This class takes advantage of the guarantee that two identical objects across adjacent data states will be the same instance. Comparisons
* can therefore be done with ==, rather than checking for identical serialized representations.
*
* @author dkoszewnik
*
*/
public class DiffHistoryTracker {
private final int historySizeToKeep;
private final FastBlobStateEngine stateEngine;
private final LinkedList<DiffHistoricalState> historicalStates;
private final Map<String, Map<String, String>> historicalStateHeaderTags;
private final TypeDiffInstruction<?> typeDiffInstructions[];
private DiffHistoryDataState currentDataState;
/**
*
* @param numStatesToKeep - The number of historical states to keep
* @param stateEngine - The state engine to track the history of
* @param diffInstruction - The set of key extractions for types in the object model.
*/
public DiffHistoryTracker(int numStatesToKeep, FastBlobStateEngine stateEngine, DiffInstruction diffInstruction) {
this.historySizeToKeep = numStatesToKeep;
this.stateEngine = stateEngine;
this.historicalStates = new LinkedList<DiffHistoricalState>();
this.historicalStateHeaderTags = new ConcurrentHashMap<String, Map<String,String>>();
this.typeDiffInstructions = diffInstruction.getTypeInstructions();
}
/**
* Call this method after new data has been loaded by the FastBlobStateEngine. This will add a historical record
* of the differences between the previous state and this new state.
*/
public void addState() {
DiffHistoryDataState nextState = new DiffHistoryDataState(stateEngine, typeDiffInstructions);
if(currentDataState != null)
newHistoricalState(currentDataState, nextState);
currentDataState = nextState;
}
private void newHistoricalState(final DiffHistoryDataState from, final DiffHistoryDataState to) {
final DiffHistoricalState historicalState = new DiffHistoricalState(to.getVersion());
SimultaneousExecutor executor = new SimultaneousExecutor();
for(final TypeDiffInstruction<?> typeInstruction : from.getTypeDiffInstructions()) {
executor.execute(new Runnable() {
public void run() {
Map<Object, Object> fromTypeState = from.getTypeState(typeInstruction.getTypeIdentifier());
Map<Object, Object> toTypeState = to.getTypeState(typeInstruction.getTypeIdentifier());
historicalState.addTypeState(typeInstruction, fromTypeState, toTypeState);
}
});
}
executor.awaitUninterruptibly();
historicalStates.addFirst(historicalState);
historicalStateHeaderTags.put(to.getVersion(), new HashMap<String, String>(stateEngine.getHeaderTags()));
/// trim historical entries beyond desired size.
if(historicalStates.size() > historySizeToKeep) {
DiffHistoricalState removedState = historicalStates.removeLast();
historicalStateHeaderTags.remove(removedState.getVersion());
}
}
/**
* Return the history of the object identified by the supplied type / key combination.<p/>
*
* The returned list will contain one entry for each state in the rolling history retained by this DiffHistoryTracker.
* The latest entry will be at index 0, and the earliest entry will be the last in the list. Not all entries in the returned
* list must reflect a transition; an entry is included in the list whether or not the instance changed for a given state.
*
*/
public <T> List<DiffObjectHistoricalTransition<T>> getObjectHistory(String type, Object key) {
List<DiffObjectHistoricalTransition<T>> states = new ArrayList<DiffObjectHistoricalTransition<T>>(historicalStates.size());
Map<Object, T> typeState = currentDataState.getTypeState(type);
/// start with the currently available item (if available)
T currentItem = typeState.get(key);
/// and work backwards through history.
for(DiffHistoricalState state : historicalStates) {
DiffHistoricalTypeState<Object, T> historicalState = state.getTypeState(type);
Map<Object, T> diffObjects = historicalState.getDiffObjects();
Map<Object, T> deletedObjects = historicalState.getDeletedObjects();
Set<Object> newObjects = historicalState.getNewObjects();
T previous;
if(diffObjects.containsKey(key)) {
previous = diffObjects.get(key);
} else if(deletedObjects.containsKey(key)) {
previous = deletedObjects.get(key);
} else if(newObjects.contains(key)) {
previous = null;
} else {
previous = currentItem;
}
/// adding the from -> to objects (whether identical or not) to a list.
states.add(new DiffObjectHistoricalTransition<T>(state.getVersion(), previous, currentItem));
currentItem = previous;
}
return states;
}
/**
* Returns a list of the historical states, starting with the most recent and ending with the oldest.
*/
public List<DiffHistoricalState> getHistoricalStates() {
return Collections.unmodifiableList(historicalStates);
}
/**
* Returns the header tags which were attached to the given version.
*/
public Map<String, String> getHistoricalStateHeaderTags(String stateVersion) {
return historicalStateHeaderTags.get(stateVersion);
}
}
| 8,386 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/diff | Create_ds/zeno/src/main/java/com/netflix/zeno/diff/history/DiffObjectHistoricalTransition.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.diff.history;
/**
* Describes a historical transition an object went through when a data state was loaded.<p/>
*
* It's possible that no transition occurred for this object.
*
* @author dkoszewnik
*
* @param <T>
*/
public class DiffObjectHistoricalTransition<T> {
private final String dataVersion;
private final T before;
private final T after;
public DiffObjectHistoricalTransition(String version, T before, T after) {
this.dataVersion = version;
this.before = before;
this.after = after;
}
public String getDataVersion() {
return dataVersion;
}
public T getBefore() {
return before;
}
public T getAfter() {
return after;
}
public boolean itemChanged() {
return before != after;
}
}
| 8,387 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/diff | Create_ds/zeno/src/main/java/com/netflix/zeno/diff/history/DiffHistoryDataState.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.diff.history;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.mutable.MutableInt;
import com.netflix.zeno.diff.TypeDiffInstruction;
import com.netflix.zeno.fastblob.FastBlobStateEngine;
import com.netflix.zeno.fastblob.state.FastBlobTypeDeserializationState;
import com.netflix.zeno.util.collections.impl.OpenAddressingHashMap;
/**
* A complete historical representation of the objects available in a
* FastBlobStateEngine at some point in time.<p/>
*
* This representation contains all of the objects of each type specified in a
* set of TypeDiffInstructions, keyed by the keys specified in thos
* TypeDiffInstructions.
*
* @author dkoszewnik
*
*/
public class DiffHistoryDataState {
private final String version;
private final TypeDiffInstruction<?>[] typeInstructions;
private final Map<String, Map<?, ?>> typeStates;
/**
* Create a new DiffHistoryDataState. Pulls data from the supplied
* FastBlobStateEngine in the manner specified by the supplied set of
* TypeDiffInstructions.
*
* @param stateEngine
* @param typeInstructions
*/
@SuppressWarnings("unchecked")
public DiffHistoryDataState(FastBlobStateEngine stateEngine, TypeDiffInstruction<?>... typeInstructions) {
this.version = stateEngine.getLatestVersion();
this.typeInstructions = typeInstructions;
this.typeStates = new HashMap<String, Map<?, ?>>();
for (TypeDiffInstruction<?> instruction : typeInstructions) {
FastBlobTypeDeserializationState<Object> typeState = stateEngine.getTypeDeserializationState(instruction.getSerializerName());
addTypeState(typeState, (TypeDiffInstruction<Object>) instruction);
}
}
private <T> void addTypeState(FastBlobTypeDeserializationState<T> deserializationState, TypeDiffInstruction<T> instruction) {
if (instruction.isUniqueKey())
buildUniqueKeyTypeState(deserializationState, instruction);
else
buildGroupedTypeState(deserializationState, instruction);
}
private <T> void buildUniqueKeyTypeState(FastBlobTypeDeserializationState<T> deserializationState, TypeDiffInstruction<T> instruction) {
OpenAddressingHashMap<Object, T> typeState = new OpenAddressingHashMap<Object, T>();
typeState.builderInit(deserializationState.countObjects());
int counter = 0;
for (T obj : deserializationState) {
Object key = instruction.getKeyFromObject(obj);
typeState.builderPut(counter++, key, obj);
}
typeState.builderFinish();
typeStates.put(instruction.getTypeIdentifier(), typeState);
}
private <T> void buildGroupedTypeState(FastBlobTypeDeserializationState<T> deserializationState, TypeDiffInstruction<T> instruction) {
Map<Object, MutableInt> countsByKey = countObjectsByKey(deserializationState, instruction);
Map<Object, List<T>> groupsByKey = groupObjectsByKey(deserializationState, instruction, countsByKey);
OpenAddressingHashMap<Object, List<T>> typeState = buildNewTypeState(groupsByKey);
typeStates.put(instruction.getTypeIdentifier(), typeState);
}
private <T> OpenAddressingHashMap<Object, List<T>> buildNewTypeState(Map<Object, List<T>> groupsByKey) {
OpenAddressingHashMap<Object, List<T>> typeState = new OpenAddressingHashMap<Object, List<T>>();
typeState.builderInit(groupsByKey.size());
int counter = 0;
for (Map.Entry<Object, List<T>> entry : groupsByKey.entrySet()) {
typeState.builderPut(counter++, entry.getKey(), entry.getValue());
}
typeState.builderFinish();
return typeState;
}
private <T> Map<Object, List<T>> groupObjectsByKey(FastBlobTypeDeserializationState<T> deserializationState, TypeDiffInstruction<T> instruction, Map<Object, MutableInt> countsByKey) {
Map<Object, List<T>> groupsByKey = new HashMap<Object, List<T>>(countsByKey.size());
for (T obj : deserializationState) {
Object key = instruction.getKeyFromObject(obj);
List<T> groupList = groupsByKey.get(key);
if (groupList == null) {
int count = countsByKey.get(key).intValue();
groupList = new ArrayList<T>(count);
groupsByKey.put(key, groupList);
}
groupList.add(obj);
}
return groupsByKey;
}
private <T> Map<Object, MutableInt> countObjectsByKey(FastBlobTypeDeserializationState<T> deserializationState, TypeDiffInstruction<T> instruction) {
Map<Object, MutableInt> countsByKey = new HashMap<Object, MutableInt>(deserializationState.countObjects());
for (T obj : deserializationState) {
Object key = instruction.getKeyFromObject(obj);
MutableInt count = (MutableInt) countsByKey.get(key);
if (count == null) {
count = new MutableInt(0);
countsByKey.put(key, count);
}
count.increment();
}
return countsByKey;
}
public TypeDiffInstruction<?>[] getTypeDiffInstructions() {
return typeInstructions;
}
public String getVersion() {
return version;
}
@SuppressWarnings("unchecked")
public <K, V> Map<K, V> getTypeState(String name) {
return (Map<K, V>) typeStates.get(name);
}
}
| 8,388 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno/diff | Create_ds/zeno/src/main/java/com/netflix/zeno/diff/history/DiffHistoricalState.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.diff.history;
import com.netflix.zeno.diff.TypeDiffInstruction;
import com.netflix.zeno.util.collections.impl.OpenAddressingArraySet;
import com.netflix.zeno.util.collections.impl.OpenAddressingHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
/**
* Represents a historical set of changes in a version of a FastBlobStateEngine
*
* @author dkoszewnik
*
*/
public class DiffHistoricalState {
private final String version;
private final Map<String, DiffHistoricalTypeState<?, ?>> typeStates;
public DiffHistoricalState(String version) {
this.version = version;
this.typeStates = new ConcurrentHashMap<String, DiffHistoricalTypeState<?, ?>>();
}
public String getVersion() {
return version;
}
public Set<String> getTypes() {
return typeStates.keySet();
}
@SuppressWarnings("unchecked")
public <K, V> DiffHistoricalTypeState<K, V>getTypeState(String objectType) {
return (DiffHistoricalTypeState<K, V>)typeStates.get(objectType);
}
public int numTotalChanges() {
int totalChanges = 0;
for(Map.Entry<String, DiffHistoricalTypeState<?, ?>>entry : typeStates.entrySet()) {
totalChanges += entry.getValue().numChanges();
}
return totalChanges;
}
public <K, V> void addTypeState(TypeDiffInstruction<?> typeInstruction, Map<K, V> from, Map<K, V> to) {
String typeIdentifier = typeInstruction.getTypeIdentifier();
boolean isGroupOfObjects = !typeInstruction.isUniqueKey();
typeStates.put(typeIdentifier, createTypeState(from, to, isGroupOfObjects));
}
/**
* Create a historical state by determining the differences between the "from" and "to" states for this type.<p/>
*
* The key which was chosen for this type may not be unique, in which case both Maps will contain a List of items for each key.
*
*/
private <K, V> DiffHistoricalTypeState<K, V> createTypeState(Map<K, V> from, Map<K, V> to, boolean isGroupOfObjects) {
int newCounter = 0;
int diffCounter = 0;
int deleteCounter = 0;
for(K key : from.keySet()) {
V toValue = to.get(key);
if(toValue == null) {
deleteCounter++;
} else {
V fromValue = from.get(key);
if(!checkEquality(toValue, fromValue, isGroupOfObjects)) {
diffCounter++;
}
}
}
for(K key : to.keySet()) {
if(!from.containsKey(key)) {
newCounter++;
}
}
OpenAddressingArraySet<K> newSet = new OpenAddressingArraySet<K>();
OpenAddressingHashMap<K, V> diffMap = new OpenAddressingHashMap<K, V>();
OpenAddressingHashMap<K, V> deleteMap = new OpenAddressingHashMap<K, V>();
newSet.builderInit(newCounter);
diffMap.builderInit(diffCounter);
deleteMap.builderInit(deleteCounter);
newCounter = diffCounter = deleteCounter = 0;
for(K key : from.keySet()) {
V fromValue = from.get(key);
V toValue = to.get(key);
if(toValue == null) {
deleteMap.builderPut(deleteCounter++, key, fromValue);
} else {
if(!checkEquality(toValue, fromValue, isGroupOfObjects)) {
diffMap.builderPut(diffCounter++, key, fromValue);
}
}
}
for(K key : to.keySet()) {
if(!from.containsKey(key)) {
newSet.builderSet(newCounter++, key);
}
}
newSet.builderFinish();
diffMap.builderFinish();
deleteMap.builderFinish();
return new DiffHistoricalTypeState<K, V>(newSet, diffMap, deleteMap);
}
/**
* Equality is different depending on whether or not we are keying by a unique key.<p/>
*
* <ul>
* <li>If the key is unique, then we simply compare equality with ==.</li>
* <li>If the key is not unique, then we have grouped these elements by the key (in Lists).
* In this case, we check equality of each element with ==.</li>
* </ul>
*
*/
@SuppressWarnings("unchecked")
private boolean checkEquality(Object o1, Object o2, boolean isGroupOfObjects) {
if(isGroupOfObjects) {
/// equality for a List, in this case, means that for each list, at each element the items are == to one another.
/// we know that the element ordering is the same because we iterated over the objects in ordinal order from the type
/// state when we built the list in the DiffHistoryDataState
List<Object> l1 = (List<Object>)o1;
List<Object> l2 = (List<Object>)o2;
if(l1.size() != l2.size())
return false;
for(int i=0;i<l1.size();i++) {
if(l1.get(i) != l2.get(i))
return false;
}
return true;
} else {
return o1 == o2;
}
}
}
| 8,389 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/flatblob/FlatBlobDeserializationRecord.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.flatblob;
import com.netflix.zeno.fastblob.record.ByteData;
import com.netflix.zeno.fastblob.record.VarInt;
import com.netflix.zeno.fastblob.record.schema.FastBlobSchema;
import com.netflix.zeno.fastblob.record.schema.FastBlobSchema.FieldType;
import com.netflix.zeno.serializer.NFDeserializationRecord;
public class FlatBlobDeserializationRecord extends NFDeserializationRecord {
private final long fieldPointers[];
private ByteData byteData;
private boolean cacheElements;
public FlatBlobDeserializationRecord(FastBlobSchema schema) {
super(schema);
this.fieldPointers = new long[schema.numFields()];
}
public void setByteData(ByteData byteData) {
this.byteData = byteData;
}
public void setCacheElements(boolean cacheElements) {
this.cacheElements = cacheElements;
}
public boolean shouldCacheElements() {
return cacheElements;
}
/**
* Position this record to the byte at index <code>objectBeginOffset</code>.
*
* @param objectBeginOffset
* @return The length of the object's data, in bytes.
*/
public int position(long objectBeginOffset) {
long currentPosition = objectBeginOffset;
for(int i=0;i<fieldPointers.length;i++) {
fieldPointers[i] = currentPosition;
FieldType type = getSchema().getFieldType(i);
currentPosition += fieldLength(currentPosition, type);
}
return (int)(currentPosition - objectBeginOffset);
}
/**
* Get the underlying byte data where this record is contained.
*/
public ByteData getByteData() {
return byteData;
}
/**
* get the offset into the byte data for the field represented by the String.
*/
public long getPosition(String fieldName) {
int fieldPosition = getSchema().getPosition(fieldName);
if(fieldPosition == -1)
return -1;
return fieldPointers[fieldPosition];
}
/**
* get the offset into the byte data for the field represented by the String.
*/
@Override
public String getObjectType(String fieldName) {
return getSchema().getObjectType(fieldName);
}
private int fieldLength(long currentPosition, FieldType type) {
if(type.startsWithVarIntEncodedLength()) {
if(VarInt.readVNull(byteData, currentPosition)) {
return 1;
} else {
int fieldLength = VarInt.readVInt(byteData, currentPosition);
return VarInt.sizeOfVInt(fieldLength) + fieldLength;
}
} else if(type.equals(FieldType.OBJECT)) {
if(VarInt.readVNull(byteData, currentPosition)) {
return 1;
} else {
int ordinal = VarInt.readVInt(byteData, currentPosition);
int sizeOfOrdinal = VarInt.sizeOfVInt(ordinal);
if(VarInt.readVNull(byteData, currentPosition + sizeOfOrdinal)) {
System.out.println(getSchema().getName());
}
int flatDataSize = VarInt.readVInt(byteData, currentPosition + sizeOfOrdinal);
return VarInt.sizeOfVInt(flatDataSize) + sizeOfOrdinal + flatDataSize;
}
} else if(type.getFixedLength() != -1) {
return type.getFixedLength();
} else {
if(VarInt.readVNull(byteData, currentPosition)) {
return 1;
} else {
long value = VarInt.readVLong(byteData, currentPosition);
return VarInt.sizeOfVLong(value);
}
}
}
}
| 8,390 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/flatblob/FlatBlobSerializationFramework.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.flatblob;
import com.netflix.zeno.fastblob.FastBlobStateEngine;
import com.netflix.zeno.fastblob.record.ByteData;
import com.netflix.zeno.fastblob.record.ByteDataBuffer;
import com.netflix.zeno.serializer.SerializationFramework;
import com.netflix.zeno.serializer.SerializerFactory;
/**
* The "flat blob" is currently an experiment. We are challenging the assumption that all Netflix applications require
* all video metadata in memory at any given time.<p/>
*
* For at least some of our applications, we observe a power-law distribution of accesses by key. We believe that we may
* be able to maximize the value of the cache by only storing in memory the most frequently accessed items. The rest of the items
* can be stored off-heap somewhere.<p/>
*
* Whether "somewhere" is on disk or on a separate server, the cost of retrieving data will be dominated by the back-and-forth time
* to the off-heap repository, not the amount of data returned. Consequently, we need to be able to retrieve an entire hierarchy of
* each object in a single request (rather than making piecewise calls off-heap for each sub-element).<p/>
*
* This is where the "flat blob" comes in. The "flat blob" representation includes (in roughly FastBlob format), each of the data elements
* which are referenced by a given object.<p/>
*
* Experiments have shown that even in a partial cache, deduplication still has enormous value (even caching just the most frequently
* accessed 10,000 items, FastBlob-style deduplication results in a 71% reduction in memory footprint).<p/>
*
* Let's take and example object OBJ, which references three sub-objects O1, O2, and O3, with the ordinals 0, 1, and 2, respectively.<p/>
*
* The FastBlob serialization is [OBJ] = "012"<br/>
* The FlatBlob serialization is [OBJ] = "0[O1]1[O2]2[O3]"<p/>
*
* Where the FastBlob serialization format includes only ordinal references for sub-elements, the "flat blob" serialization format includes <i>both</i>
* the ordinal reference and the complete serialized representation of those sub-elements.<p/>
*
* The deserializer can optionally cache these intermediate objects. When reading this data, the deserializer will check to see whether each intermediate
* object is cached. If so, it will use the cached copy. If not, it will deserialize and then optionally cache the sub-object. This results in
* the reduced allocation, promotion and memory footprint enjoyed by the FastBlobStateEngine. <p/>
*
* The FlatBlob builds on the FastBlobStateEngine foundation by retaining the concepts of data states and ordinals. In this way, we
* can retain the memory footprint and GC overhead benefits of FastBlob-style deduplication, while simultaneously optimizing for higher-latency
* off-heap data access.
*
* @author dkoszewnik
*
*/
public class FlatBlobSerializationFramework extends SerializationFramework {
FastBlobStateEngine stateEngine;
public FlatBlobSerializationFramework(SerializerFactory serializerFactory) {
this(serializerFactory, null);
}
public FlatBlobSerializationFramework(SerializerFactory serializerFactory, FastBlobStateEngine readDeserializedObjectsFrom) {
super(serializerFactory);
this.stateEngine = readDeserializedObjectsFrom;
this.frameworkSerializer = new FlatBlobFrameworkSerializer(this, stateEngine);
this.frameworkDeserializer = new FlatBlobFrameworkDeserializer(this);
///TODO: The data structure created here is used for double snapshot refresh. If this is used in a real implementation,
///then we would require a separate instance of the identity ordinal map, AND we would need to update this every cycle,
///AND make sure this doesn't get out of sync with the actual objects.
if(stateEngine != null) {
for(String serializerName : stateEngine.getSerializerNames()) {
stateEngine.getTypeDeserializationState(serializerName).createIdentityOrdinalMap();
}
}
}
public void serialize(String type, Object obj, ByteDataBuffer os) {
FlatBlobSerializationRecord rec = ((FlatBlobFrameworkSerializer)frameworkSerializer).getSerializationRecord(type);
getSerializer(type).serialize(obj, rec);
rec.writeDataTo(os);
}
public <T> T deserialize(String type, ByteData data, boolean cacheElements) {
return deserialize(type, data, 0, cacheElements);
}
@SuppressWarnings("unchecked")
public <T> T deserialize(String type, ByteData data, int position, boolean cacheElements) {
FlatBlobDeserializationRecord rec = ((FlatBlobFrameworkDeserializer)frameworkDeserializer).getDeserializationRecord(type);
rec.setCacheElements(cacheElements);
rec.setByteData(data);
rec.position(position);
return (T) getSerializer(type).deserialize(rec);
}
public <T> T getCached(String type, int ordinal) {
FlatBlobTypeCache<T> typeCache = ((FlatBlobFrameworkDeserializer)frameworkDeserializer).getTypeCache(type);
return typeCache.get(ordinal);
}
<T> FlatBlobTypeCache<T> getTypeCache(String type) {
return ((FlatBlobFrameworkDeserializer)frameworkDeserializer).getTypeCache(type);
}
}
| 8,391 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/flatblob/FlatBlobSerializationRecord.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.flatblob;
import com.netflix.zeno.fastblob.FastBlobFrameworkSerializer;
import com.netflix.zeno.fastblob.record.ByteDataBuffer;
import com.netflix.zeno.fastblob.record.VarInt;
import com.netflix.zeno.fastblob.record.schema.FastBlobSchema;
import com.netflix.zeno.fastblob.record.schema.FastBlobSchema.FieldType;
import com.netflix.zeno.serializer.NFSerializationRecord;
public class FlatBlobSerializationRecord extends NFSerializationRecord {
private final ByteDataBuffer fieldData[];
private final boolean isNonNull[];
/**
* Create a new FlatBlobSerializationRecord which conforms to the given FastBlobSchema.
*/
public FlatBlobSerializationRecord(FastBlobSchema schema) {
this.fieldData = new ByteDataBuffer[schema.numFields()];
this.isNonNull = new boolean[schema.numFields()];
for (int i = 0; i < fieldData.length; i++) {
fieldData[i] = new ByteDataBuffer(32);
}
setSchema(schema);
}
/**
* Returns the buffer which should be used to serialize the data for the given field.
*
* @param field
* @return
*/
public ByteDataBuffer getFieldBuffer(String field) {
int fieldPosition = getSchema().getPosition(field);
return getFieldBuffer(fieldPosition);
}
/**
* Returns the buffer which should be used to serialize the data for the field at the given position in the schema.<p/>
*
* This is used by the FlatBlobFrameworkSerializer when writing the data for a specific field.
*
* @param field
* @return
*/
public ByteDataBuffer getFieldBuffer(int fieldPosition) {
isNonNull[fieldPosition] = true;
fieldData[fieldPosition].reset();
return fieldData[fieldPosition];
}
/**
* Concatenates all fields, in order, to the ByteDataBuffer supplied. This concatenation is the
* verbatim serialized representation in the FlatBlob.
*
* @param buf
*/
public void writeDataTo(ByteDataBuffer buf) {
for (int i = 0; i < fieldData.length; i++) {
FieldType fieldType = getSchema().getFieldType(i);
if (isNonNull[i]) {
if (fieldType.startsWithVarIntEncodedLength()) {
VarInt.writeVInt(buf, (int)fieldData[i].length());
}
fieldData[i].copyTo(buf);
} else {
if(fieldType == FieldType.FLOAT) {
FastBlobFrameworkSerializer.writeNullFloat(buf);
} else if(fieldType == FieldType.DOUBLE) {
FastBlobFrameworkSerializer.writeNullDouble(buf);
} else {
VarInt.writeVNull(buf);
}
}
}
}
/**
* Returns the number of bytes which will be written when writeDataTo(ByteDataBuffer buf) is called.
*
* @param buf
*/
public int sizeOfData() {
int dataSize = 0;
for (int i = 0; i < fieldData.length; i++) {
FieldType fieldType = getSchema().getFieldType(i);
if (isNonNull[i]) {
if (fieldType.startsWithVarIntEncodedLength()) {
dataSize += VarInt.sizeOfVInt((int)fieldData[i].length());
}
dataSize += fieldData[i].length();
} else {
if(fieldType == FieldType.FLOAT) {
dataSize += 4;
} else if(fieldType == FieldType.DOUBLE) {
dataSize += 8;
} else {
dataSize ++;
}
}
}
return dataSize;
}
/**
* Reset the ByteDataBuffers for each field.
*/
public void reset() {
for (int i = 0; i < fieldData.length; i++) {
isNonNull[i] = false;
}
}
}
| 8,392 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/flatblob/FlatBlobEvictor.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.flatblob;
import com.netflix.zeno.serializer.SerializationFramework;
import com.netflix.zeno.serializer.SerializerFactory;
public class FlatBlobEvictor extends SerializationFramework {
public FlatBlobEvictor(SerializerFactory serializerFactory, FlatBlobSerializationFramework flatBlobFramework) {
super(serializerFactory);
this.frameworkSerializer = new FlatBlobEvictionFrameworkSerializer(this, flatBlobFramework);
}
public void evict(String type, Object obj) {
FlatBlobSerializationRecord record = new FlatBlobSerializationRecord(getSerializer(type).getFastBlobSchema());
getSerializer(type).serialize(obj, record);
}
}
| 8,393 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/flatblob/FlatBlobFrameworkSerializer.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.flatblob;
import com.netflix.zeno.fastblob.FastBlobStateEngine;
import com.netflix.zeno.fastblob.record.ByteDataBuffer;
import com.netflix.zeno.fastblob.record.VarInt;
import com.netflix.zeno.fastblob.state.FastBlobTypeDeserializationState;
import com.netflix.zeno.serializer.FrameworkSerializer;
import com.netflix.zeno.serializer.NFTypeSerializer;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
public class FlatBlobFrameworkSerializer extends FrameworkSerializer<FlatBlobSerializationRecord> {
static final int NULL_FLOAT_BITS = Float.floatToIntBits(Float.NaN) + 1;
static final long NULL_DOUBLE_BITS = Double.doubleToLongBits(Double.NaN) + 1;
private final FastBlobStateEngine stateEngine;
private final ThreadLocal<Map<String, FlatBlobSerializationRecord>> cachedSerializationRecords;
public FlatBlobFrameworkSerializer(FlatBlobSerializationFramework flatBlobFramework, FastBlobStateEngine stateEngine) {
super(flatBlobFramework);
this.stateEngine = stateEngine;
this.cachedSerializationRecords = new ThreadLocal<Map<String, FlatBlobSerializationRecord>>();
}
/**
* Serialize a primitive element
*/
@Override
public void serializePrimitive(FlatBlobSerializationRecord rec, String fieldName, Object value) {
if (value == null) {
return;
}
if (value instanceof Integer) {
serializePrimitive(rec, fieldName, ((Integer) value).intValue());
} else if (value instanceof Long) {
serializePrimitive(rec, fieldName, ((Long) value).longValue());
} else if (value instanceof Float) {
serializePrimitive(rec, fieldName, ((Float) value).floatValue());
} else if (value instanceof Double) {
serializePrimitive(rec, fieldName, ((Double) value).doubleValue());
} else if (value instanceof Boolean) {
serializePrimitive(rec, fieldName, ((Boolean) value).booleanValue());
} else if (value instanceof String) {
serializeString(rec, fieldName, (String) value);
} else if (value instanceof byte[]){
serializeBytes(rec, fieldName, (byte[]) value);
} else {
throw new RuntimeException("Primitive type " + value.getClass().getSimpleName() + " not supported!");
}
}
/**
* Serialize an integer, use zig-zag encoding to (probably) get a small positive value, then encode the result as a variable-byte integer.
*/
@Override
public void serializePrimitive(FlatBlobSerializationRecord rec, String fieldName, int value) {
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(fieldName);
// zig zag encoding
VarInt.writeVInt(fieldBuffer, (value << 1) ^ (value >> 31));
}
/**
* Serialize a long, use zig-zag encoding to (probably) get a small positive value, then encode the result as a variable-byte long.
*/
@Override
public void serializePrimitive(FlatBlobSerializationRecord rec, String fieldName, long value) {
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(fieldName);
// zig zag encoding
VarInt.writeVLong(fieldBuffer, (value << 1) ^ (value >> 63));
}
/**
* Serialize a float into 4 consecutive bytes
*/
@Override
public void serializePrimitive(FlatBlobSerializationRecord rec, String fieldName, float value) {
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(fieldName);
int intBits = Float.floatToIntBits(value);
writeFixedLengthInt(fieldBuffer, intBits);
}
/**
* Write 4 consecutive bytes
*/
private static void writeFixedLengthInt(ByteDataBuffer fieldBuffer, int intBits) {
fieldBuffer.write((byte) (intBits >>> 24));
fieldBuffer.write((byte) (intBits >>> 16));
fieldBuffer.write((byte) (intBits >>> 8));
fieldBuffer.write((byte) (intBits));
}
/**
* Serialize a double into 8 consecutive bytes
*/
@Override
public void serializePrimitive(FlatBlobSerializationRecord rec, String fieldName, double value) {
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(fieldName);
long intBits = Double.doubleToLongBits(value);
writeFixedLengthLong(fieldBuffer, intBits);
}
/**
* Write 8 consecutive bytes
*/
private static void writeFixedLengthLong(ByteDataBuffer fieldBuffer, long intBits) {
fieldBuffer.write((byte) (intBits >>> 56));
fieldBuffer.write((byte) (intBits >>> 48));
fieldBuffer.write((byte) (intBits >>> 40));
fieldBuffer.write((byte) (intBits >>> 32));
fieldBuffer.write((byte) (intBits >>> 24));
fieldBuffer.write((byte) (intBits >>> 16));
fieldBuffer.write((byte) (intBits >>> 8));
fieldBuffer.write((byte) (intBits));
}
/**
* Serialize a boolean as a single byte
*/
@Override
public void serializePrimitive(FlatBlobSerializationRecord rec, String fieldName, boolean value) {
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(fieldName);
byte byteValue = value ? (byte) 1 : (byte) 0;
fieldBuffer.write(byteValue);
}
private void serializeString(FlatBlobSerializationRecord rec, String fieldName, String value) {
if(value == null)
return;
writeString(value, rec.getFieldBuffer(fieldName));
}
@Override
public void serializeBytes(FlatBlobSerializationRecord rec, String fieldName, byte[] value) {
if(value == null)
return;
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(fieldName);
for (int i = 0; i < value.length; i++) {
fieldBuffer.write(value[i]);
}
}
/*
* @Deprecated instead use serializeObject(FlatBlobSerializationRecord rec, String fieldName, Object obj)
*
*/
@Deprecated
@Override
public void serializeObject(FlatBlobSerializationRecord rec, String fieldName, String typeName, Object obj) {
int fieldPosition = rec.getSchema().getPosition(fieldName);
validateField(fieldName, fieldPosition);
serializeObject(rec, fieldPosition, typeName, obj);
}
private void validateField(String fieldName, int fieldPosition) {
if(fieldPosition == -1) {
throw new IllegalArgumentException("Attempting to serialize non existent field " + fieldName + ".");
}
}
private void serializeObject(FlatBlobSerializationRecord rec, int fieldPosition, String typeName, Object obj) {
if(obj == null)
return;
int ordinal = findOrdinalInStateEngine(typeName, obj);
FlatBlobSerializationRecord subRecord = getSerializationRecord(typeName);
framework.getSerializer(typeName).serialize(obj, subRecord);
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(fieldPosition);
VarInt.writeVInt(fieldBuffer, ordinal);
VarInt.writeVInt(fieldBuffer, subRecord.sizeOfData());
subRecord.writeDataTo(fieldBuffer);
}
@Override
public void serializeObject(FlatBlobSerializationRecord rec, String fieldName, Object obj) {
int fieldPosition = rec.getSchema().getPosition(fieldName);
validateField(fieldName, fieldPosition);
serializeObject(rec, fieldPosition, rec.getSchema().getObjectType(fieldName), obj);
}
@Override
public <T> void serializeList(FlatBlobSerializationRecord rec, String fieldName, String typeName, Collection<T> obj) {
if(obj == null)
return;
NFTypeSerializer<Object> elementSerializer = framework.getSerializer(typeName);
int fieldPosition = rec.getSchema().getPosition(fieldName);
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(fieldPosition);
FlatBlobSerializationRecord subRecord = getSerializationRecord(typeName);
for(T t : obj) {
if(t == null) {
VarInt.writeVNull(fieldBuffer);
} else {
int ordinal = findOrdinalInStateEngine(typeName, t);
elementSerializer.serialize(t, subRecord);
VarInt.writeVInt(fieldBuffer, ordinal);
VarInt.writeVInt(fieldBuffer, subRecord.sizeOfData());
subRecord.writeDataTo(fieldBuffer);
subRecord.reset();
}
}
}
@Override
public <T> void serializeSet(FlatBlobSerializationRecord rec, String fieldName, String typeName, Set<T> set) {
if(set == null)
return;
FastBlobTypeDeserializationState<Object> typeDeserializationState = stateEngine.getTypeDeserializationState(typeName);
int fieldPosition = rec.getSchema().getPosition(fieldName);
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(fieldPosition);
int setOrdinals[] = new int[set.size()];
Object unidentifiedSetObjects[] = null;
int i = 0;
for (T obj : set) {
if(obj == null) {
setOrdinals[i++] = -1;
} else {
setOrdinals[i] = typeDeserializationState.find(obj);
if(setOrdinals[i] == -1) {
if(unidentifiedSetObjects == null)
unidentifiedSetObjects = new Object[set.size()];
unidentifiedSetObjects[i] = obj;
setOrdinals[i] = Integer.MIN_VALUE;
}
i++;
}
}
Arrays.sort(setOrdinals);
FlatBlobSerializationRecord subRecord = getSerializationRecord(typeName);
int currentOrdinal = 0;
for(i=0;i<setOrdinals.length;i++) {
if(setOrdinals[i] == -1) {
VarInt.writeVNull(fieldBuffer);
VarInt.writeVNull(fieldBuffer);
} else {
if(setOrdinals[i] == Integer.MIN_VALUE) {
Object element = unidentifiedSetObjects[i];
framework.getSerializer(typeName).serialize(element, subRecord);
VarInt.writeVNull(fieldBuffer);
} else {
Object element = typeDeserializationState.get(setOrdinals[i]);
framework.getSerializer(typeName).serialize(element, subRecord);
VarInt.writeVInt(fieldBuffer, setOrdinals[i] - currentOrdinal);
currentOrdinal = setOrdinals[i];
}
VarInt.writeVInt(fieldBuffer, subRecord.sizeOfData());
subRecord.writeDataTo(fieldBuffer);
subRecord.reset();
}
}
}
@Override
public <K, V> void serializeMap(FlatBlobSerializationRecord rec, String fieldName, String keyTypeName, String valueTypeName, Map<K, V> map) {
if(map == null)
return;
FastBlobTypeDeserializationState<Object> keyDeserializationState = stateEngine.getTypeDeserializationState(keyTypeName);
FastBlobTypeDeserializationState<Object> valueDeserializationState = stateEngine.getTypeDeserializationState(valueTypeName);
int fieldPosition = rec.getSchema().getPosition(fieldName);
ByteDataBuffer fieldBuffer = rec.getFieldBuffer(fieldPosition);
FlatBlobSerializationRecord keyRecord = getSerializationRecord(keyTypeName);
FlatBlobSerializationRecord valueRecord = getSerializationRecord(valueTypeName);
long mapEntries[] = new long[map.size()];
int i = 0;
for (Map.Entry<K, V> entry : map.entrySet()) {
int keyOrdinal = -1;
int valueOrdinal = -1;
if(entry.getKey() != null)
keyOrdinal = keyDeserializationState.find(entry.getKey());
if(entry.getValue() != null)
valueOrdinal = valueDeserializationState.find(entry.getValue());
mapEntries[i++] = ((long)valueOrdinal << 32) | (keyOrdinal & 0xFFFFFFFFL);
}
if(mapEntries.length > i) {
mapEntries = Arrays.copyOf(mapEntries, i);
throw new RuntimeException("This should not happen."); ///TODO: Remove this sanity check.
}
Arrays.sort(mapEntries);
int currentValueOrdinal = 0;
for(i=0;i<mapEntries.length;i++) {
int keyOrdinal = (int) mapEntries[i];
int valueOrdinal = (int) (mapEntries[i] >> 32);
if(keyOrdinal == -1) {
VarInt.writeVNull(fieldBuffer);
} else {
Object key = keyDeserializationState.get(keyOrdinal);
keyRecord.reset();
framework.getSerializer(keyTypeName).serialize(key, keyRecord);
VarInt.writeVInt(fieldBuffer, keyOrdinal);
VarInt.writeVInt(fieldBuffer, keyRecord.sizeOfData());
keyRecord.writeDataTo(fieldBuffer);
}
if(valueOrdinal == -1) {
VarInt.writeVNull(fieldBuffer);
} else {
Object value = valueDeserializationState.get(valueOrdinal);
valueRecord.reset();
framework.getSerializer(valueTypeName).serialize(value, valueRecord);
VarInt.writeVInt(fieldBuffer, valueOrdinal - currentValueOrdinal);
VarInt.writeVInt(fieldBuffer, valueRecord.sizeOfData());
valueRecord.writeDataTo(fieldBuffer);
currentValueOrdinal = valueOrdinal;
}
}
}
/**
* Encode a String as a series of VarInts, one per character.<p/>
*
* @param str
* @param out
* @return
* @throws IOException
*/
private void writeString(String str, ByteDataBuffer out) {
for(int i=0;i<str.length();i++) {
VarInt.writeVInt(out, str.charAt(i));
}
}
private int findOrdinalInStateEngine(String typeName, Object obj) {
FastBlobTypeDeserializationState<Object> typeDeserializationState = stateEngine.getTypeDeserializationState(typeName);
int ordinal = typeDeserializationState.find(obj);
return ordinal;
}
FlatBlobSerializationRecord getSerializationRecord(String type) {
Map<String, FlatBlobSerializationRecord> cachedSerializationRecords = this.cachedSerializationRecords.get();
if(cachedSerializationRecords == null) {
cachedSerializationRecords = new HashMap<String, FlatBlobSerializationRecord>();
this.cachedSerializationRecords.set(cachedSerializationRecords);
}
FlatBlobSerializationRecord rec = cachedSerializationRecords.get(type);
if(rec == null) {
rec = new FlatBlobSerializationRecord(framework.getSerializer(type).getFastBlobSchema());
cachedSerializationRecords.put(type, rec);
}
rec.reset();
return rec;
}
}
| 8,394 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/flatblob/FlatBlobFrameworkDeserializer.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.flatblob;
import static com.netflix.zeno.flatblob.FlatBlobFrameworkSerializer.NULL_DOUBLE_BITS;
import static com.netflix.zeno.flatblob.FlatBlobFrameworkSerializer.NULL_FLOAT_BITS;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import com.netflix.zeno.fastblob.record.ByteData;
import com.netflix.zeno.fastblob.record.VarInt;
import com.netflix.zeno.fastblob.record.schema.FastBlobSchema;
import com.netflix.zeno.serializer.FrameworkDeserializer;
import com.netflix.zeno.serializer.NFTypeSerializer;
import com.netflix.zeno.util.collections.CollectionImplementation;
import com.netflix.zeno.util.collections.MinimizedUnmodifiableCollections;
import com.netflix.zeno.util.collections.builder.ListBuilder;
import com.netflix.zeno.util.collections.builder.MapBuilder;
import com.netflix.zeno.util.collections.builder.SetBuilder;
public class FlatBlobFrameworkDeserializer extends FrameworkDeserializer<FlatBlobDeserializationRecord>{
private final Map<String, FlatBlobTypeCache<?>> typeCaches;
private final ThreadLocal<Map<String, FlatBlobDeserializationRecord>> deserializationRecords;
private MinimizedUnmodifiableCollections minimizedCollections = new MinimizedUnmodifiableCollections(CollectionImplementation.JAVA_UTIL);
public void setCollectionImplementation(CollectionImplementation impl) {
minimizedCollections = new MinimizedUnmodifiableCollections(impl);
}
protected FlatBlobFrameworkDeserializer(FlatBlobSerializationFramework framework) {
super(framework);
this.typeCaches = new HashMap<String, FlatBlobTypeCache<?>>();
this.deserializationRecords = new ThreadLocal<Map<String,FlatBlobDeserializationRecord>>();
for(NFTypeSerializer<?> serializer : framework.getOrderedSerializers()) {
typeCaches.put(serializer.getName(), new FlatBlobTypeCache<Object>(serializer.getName()));
}
}
@Override
public Boolean deserializeBoolean(FlatBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1 || VarInt.readVNull(byteData, fieldPosition))
return null;
return byteData.get(fieldPosition) == (byte) 1 ? Boolean.TRUE : Boolean.FALSE;
}
/**
* Read a boolean as a single byte.
*/
@Override
public boolean deserializePrimitiveBoolean(FlatBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
return byteData.get(fieldPosition) == (byte) 1;
}
@Override
public Integer deserializeInteger(FlatBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1 || VarInt.readVNull(byteData, fieldPosition))
return null;
int value = VarInt.readVInt(byteData, fieldPosition);
return Integer.valueOf((value >>> 1) ^ ((value << 31) >> 31));
}
/**
* Read an integer as a variable-byte sequence. After read, the value must be zig-zag decoded.
*/
@Override
public int deserializePrimitiveInt(FlatBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
int value = VarInt.readVInt(byteData, fieldPosition);
return (value >>> 1) ^ ((value << 31) >> 31);
}
@Override
public Long deserializeLong(FlatBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1 || VarInt.readVNull(byteData, fieldPosition))
return null;
long value = VarInt.readVLong(byteData, fieldPosition);
return Long.valueOf((value >>> 1) ^ ((value << 63) >> 63));
}
/**
* Read a long as a variable-byte sequence. After read, the value must be zig-zag decoded.
*/
@Override
public long deserializePrimitiveLong(FlatBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
long value = VarInt.readVLong(byteData, fieldPosition);
return (value >>> 1) ^ ((value << 63) >> 63);
}
/**
* Read a float as a fixed-length sequence of 4 bytes. Might be null.
*/
@Override
public Float deserializeFloat(FlatBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1)
return null;
int intBits = readIntBits(byteData, fieldPosition);
if(intBits == NULL_FLOAT_BITS)
return null;
return Float.valueOf(Float.intBitsToFloat(intBits));
}
/**
* Read a float as a fixed-length sequence of 4 bytes.
*/
@Override
public float deserializePrimitiveFloat(FlatBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
int intBits = readIntBits(byteData, fieldPosition);
return Float.intBitsToFloat(intBits);
}
private int readIntBits(ByteData byteData, long fieldPosition) {
int intBits = (byteData.get(fieldPosition++) & 0xFF) << 24;
intBits |= (byteData.get(fieldPosition++) & 0xFF) << 16;
intBits |= (byteData.get(fieldPosition++) & 0xFF) << 8;
intBits |= (byteData.get(fieldPosition) & 0xFF);
return intBits;
}
/**
* Read a double as a fixed-length sequence of 8 bytes. Might be null.
*/
@Override
public Double deserializeDouble(FlatBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1)
return null;
long longBits = readLongBits(byteData, fieldPosition);
if(longBits == NULL_DOUBLE_BITS)
return null;
return Double.valueOf(Double.longBitsToDouble(longBits));
}
/**
* Read a double as a fixed-length sequence of 8 bytes.
*/
@Override
public double deserializePrimitiveDouble(FlatBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
long longBits = readLongBits(byteData, fieldPosition);
return Double.longBitsToDouble(longBits);
}
private long readLongBits(ByteData byteData, long fieldPosition) {
long longBits = (long) (byteData.get(fieldPosition++) & 0xFF) << 56;
longBits |= (long) (byteData.get(fieldPosition++) & 0xFF) << 48;
longBits |= (long) (byteData.get(fieldPosition++) & 0xFF) << 40;
longBits |= (long) (byteData.get(fieldPosition++) & 0xFF) << 32;
longBits |= (long) (byteData.get(fieldPosition++) & 0xFF) << 24;
longBits |= (byteData.get(fieldPosition++) & 0xFF) << 16;
longBits |= (byteData.get(fieldPosition++) & 0xFF) << 8;
longBits |= (byteData.get(fieldPosition) & 0xFF);
return longBits;
}
/**
* Read a String as UTF-8 encoded characters. The length is encoded as a variable-byte integer.
*/
@Override
public String deserializeString(FlatBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1 || VarInt.readVNull(byteData, fieldPosition))
return null;
int length = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(length);
return readString(byteData, fieldPosition, length);
}
/**
* Read a sequence of bytes directly from the stream. The length is encoded as a variable-byte integer.
*/
@Override
public byte[] deserializeBytes(FlatBlobDeserializationRecord rec, String fieldName) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1 || VarInt.readVNull(byteData, fieldPosition))
return null;
int length = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(length);
byte data[] = new byte[length];
for(int i=0;i<length;i++) {
data[i] = byteData.get(fieldPosition++);
}
return data;
}
@Override
public <T> T deserializeObject(FlatBlobDeserializationRecord rec, String fieldName, Class<T> clazz) {
long position = rec.getPosition(fieldName);
if (position == -1)
return null;
return deserializeObject(rec, position, rec.getObjectType(fieldName));
}
/**
* @deprecated use instead deserializeObject(FlatBlobDeserializationRecord rec, String fieldName, Class<T> clazz);
*/
@Deprecated
@Override
public <T> T deserializeObject(FlatBlobDeserializationRecord rec, String fieldName, String typeName, Class<T> clazz) {
long position = rec.getPosition(fieldName);
if (position == -1)
return null;
return deserializeObject(rec, position, typeName);
}
@SuppressWarnings("unchecked")
private <T> T deserializeObject(FlatBlobDeserializationRecord rec, long position, String typeName) {
ByteData underlyingData = rec.getByteData();
if (position == -1 || VarInt.readVNull(underlyingData, position))
return null;
int ordinal = VarInt.readVInt(underlyingData, position);
FlatBlobTypeCache<T> typeCache = getTypeCache(typeName);
T cached = typeCache.get(ordinal);
if(cached != null)
return cached;
position += VarInt.sizeOfVInt(ordinal);
int sizeOfUnderlyingData = VarInt.readVInt(underlyingData, position);
position += VarInt.sizeOfVInt(sizeOfUnderlyingData);
FlatBlobDeserializationRecord subRec = getDeserializationRecord(typeName);
subRec.setByteData(rec.getByteData());
subRec.setCacheElements(rec.shouldCacheElements());
subRec.position(position);
T deserialized = (T) framework.getSerializer(typeName).deserialize(subRec);
if(rec.shouldCacheElements()) {
deserialized = typeCache.putIfAbsent(ordinal, deserialized);
}
return deserialized;
}
@Override
public <T> List<T> deserializeList(FlatBlobDeserializationRecord rec, String fieldName, NFTypeSerializer<T> itemSerializer) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1 || VarInt.readVNull(byteData, fieldPosition))
return null;
int length = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(length);
int numElements = countFlatBlobElementsInRange(byteData, fieldPosition, length);
if(numElements == 0)
return Collections.emptyList();
FlatBlobTypeCache<T>typeCache = getTypeCache(itemSerializer.getName());
ListBuilder<T> listBuilder = minimizedCollections.createListBuilder();
listBuilder.builderInit(numElements);
for(int i=0;i<numElements;i++) {
if(VarInt.readVNull(byteData, fieldPosition)) {
listBuilder.builderSet(i, null);
fieldPosition += 1;
} else {
int ordinal = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(ordinal);
int sizeOfData = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(sizeOfData);
T cached = typeCache.get(ordinal);
if(cached != null) {
listBuilder.builderSet(i, cached);
} else {
FlatBlobDeserializationRecord elementRec = getDeserializationRecord(itemSerializer.getName());
elementRec.setByteData(rec.getByteData());
elementRec.setCacheElements(rec.shouldCacheElements());
elementRec.position(fieldPosition);
T deserialized = itemSerializer.deserialize(elementRec);
if(rec.shouldCacheElements()) {
deserialized = typeCache.putIfAbsent(ordinal, deserialized);
}
listBuilder.builderSet(i, deserialized);
}
fieldPosition += sizeOfData;
}
}
return listBuilder.builderFinish();
}
@Override
public <T> Set<T> deserializeSet(FlatBlobDeserializationRecord rec, String fieldName, NFTypeSerializer<T> itemSerializer) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1 || VarInt.readVNull(byteData, fieldPosition))
return null;
int length = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(length);
int numElements = countFlatBlobSetElementsInRange(byteData, fieldPosition, length);
if(numElements == 0)
return Collections.emptySet();
FlatBlobTypeCache<T>typeCache = getTypeCache(itemSerializer.getName());
SetBuilder<T> setBuilder = minimizedCollections.createSetBuilder();
setBuilder.builderInit(numElements);
int previousOrdinal = 0;
for(int i=0;i<numElements;i++) {
if(VarInt.readVNull(byteData, fieldPosition) && VarInt.readVNull(byteData, fieldPosition + 1)) {
setBuilder.builderSet(i, null);
fieldPosition += 1;
} else {
int ordinal = -1;
if(VarInt.readVNull(byteData, fieldPosition)) {
fieldPosition++;
} else {
ordinal = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(ordinal);
ordinal += previousOrdinal;
previousOrdinal = ordinal;
}
int sizeOfData = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(sizeOfData);
T cached = typeCache.get(ordinal);
if(cached != null) {
setBuilder.builderSet(ordinal, cached);
fieldPosition += sizeOfData;
continue;
}
FlatBlobDeserializationRecord elementRec = getDeserializationRecord(itemSerializer.getName());
elementRec.setByteData(rec.getByteData());
elementRec.setCacheElements(rec.shouldCacheElements());
elementRec.position(fieldPosition);
T deserialized = itemSerializer.deserialize(elementRec);
if(rec.shouldCacheElements()) {
deserialized = typeCache.putIfAbsent(ordinal, deserialized);
}
setBuilder.builderSet(i, deserialized);
fieldPosition += sizeOfData;
}
}
return setBuilder.builderFinish();
}
@Override
public <K, V> Map<K, V> deserializeMap(FlatBlobDeserializationRecord rec, String fieldName, NFTypeSerializer<K> keySerializer, NFTypeSerializer<V> valueSerializer) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if (fieldPosition == -1 || VarInt.readVNull(byteData, fieldPosition))
return null;
int length = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(length);
int numElements = countFlatBlobElementsInRange(byteData, fieldPosition, length);
numElements /= 2;
if(numElements == 0)
return Collections.emptyMap();
MapBuilder<K, V> map = minimizedCollections.createMapBuilder();
map.builderInit(numElements);
FlatBlobTypeCache<K> keyCache = getTypeCache(keySerializer.getName());
FlatBlobTypeCache<V> valueCache = getTypeCache(valueSerializer.getName());
populateMap(byteData, fieldPosition, numElements, map, keySerializer, keyCache, valueSerializer, valueCache, rec.shouldCacheElements());
return minimizedCollections.minimizeMap(map.builderFinish());
}
@Override
public <K, V> SortedMap<K, V> deserializeSortedMap(FlatBlobDeserializationRecord rec, String fieldName, NFTypeSerializer<K> keySerializer, NFTypeSerializer<V> valueSerializer) {
ByteData byteData = rec.getByteData();
long fieldPosition = rec.getPosition(fieldName);
if(fieldPosition == -1 || VarInt.readVNull(byteData, fieldPosition))
return null;
int length = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(length);
int numElements = countFlatBlobElementsInRange(byteData, fieldPosition, length);
numElements /= 2;
if(numElements == 0)
return minimizedCollections.emptySortedMap();
MapBuilder<K, V> map = minimizedCollections.createSortedMapBuilder();
map.builderInit(numElements);
FlatBlobTypeCache<K> keyCache = getTypeCache(keySerializer.getName());
FlatBlobTypeCache<V> valueCache = getTypeCache(valueSerializer.getName());
populateMap(byteData, fieldPosition, numElements, map, keySerializer, keyCache, valueSerializer, valueCache, rec.shouldCacheElements());
return minimizedCollections.minimizeSortedMap( (SortedMap<K, V>) map.builderFinish() );
}
private <K, V> void populateMap(ByteData byteData, long fieldPosition, int numElements, MapBuilder<K, V> mapToPopulate, NFTypeSerializer<K> keySerializer, FlatBlobTypeCache<K> keyCache, NFTypeSerializer<V> valueSerializer, FlatBlobTypeCache<V> valueCache, boolean shouldCacheElements) {
int previousValueOrdinal = 0;
for(int i=0;i<numElements;i++) {
K key = null;
V value = null;
boolean undefinedKeyOrValue = false;
if(VarInt.readVNull(byteData, fieldPosition)) {
fieldPosition++;
} else {
int keyOrdinal = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(keyOrdinal);
int sizeOfData = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(sizeOfData);
key = keyCache.get(keyOrdinal);
if(key == null) {
FlatBlobDeserializationRecord rec = getDeserializationRecord(keyCache.getName());
rec.setByteData(byteData);
rec.setCacheElements(shouldCacheElements);
rec.position(fieldPosition);
key = keySerializer.deserialize(rec);
if(shouldCacheElements)
key = keyCache.putIfAbsent(keyOrdinal, key);
}
fieldPosition += sizeOfData;
if(key == null)
undefinedKeyOrValue = true;
}
if(VarInt.readVNull(byteData, fieldPosition)) {
fieldPosition++;
} else {
int valueOrdinal = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(valueOrdinal);
int sizeOfData = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(sizeOfData);
valueOrdinal += previousValueOrdinal;
previousValueOrdinal = valueOrdinal;
value = valueCache.get(valueOrdinal);
if(value == null) {
FlatBlobDeserializationRecord rec = getDeserializationRecord(valueCache.getName());
rec.setByteData(byteData);
rec.setCacheElements(shouldCacheElements);
rec.position(fieldPosition);
value = valueSerializer.deserialize(rec);
if(shouldCacheElements)
value = valueCache.putIfAbsent(valueOrdinal, value);
}
fieldPosition += sizeOfData;
if(value == null)
undefinedKeyOrValue = true;
}
if(!undefinedKeyOrValue)
mapToPopulate.builderPut(i, key, value);
}
}
private int countFlatBlobSetElementsInRange(ByteData byteData, long fieldPosition, int length) {
int numElements = 0;
long endPosition = length + fieldPosition;
while(fieldPosition < endPosition) {
if(VarInt.readVNull(byteData, fieldPosition) && VarInt.readVNull(byteData, fieldPosition + 1)) {
fieldPosition += 2;
} else {
if(VarInt.readVNull(byteData, fieldPosition)) {
fieldPosition += 1;
} else {
int ordinal = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(ordinal);
}
int eLen = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(eLen);
fieldPosition += eLen;
}
numElements++;
}
return numElements;
}
private int countFlatBlobElementsInRange(ByteData byteData, long fieldPosition, int length) {
int numElements = 0;
long endPosition = length + fieldPosition;
while(fieldPosition < endPosition) {
if(VarInt.readVNull(byteData, fieldPosition)) {
fieldPosition += 1;
} else {
int ordinal = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(ordinal);
int eLen = VarInt.readVInt(byteData, fieldPosition);
fieldPosition += VarInt.sizeOfVInt(eLen);
fieldPosition += eLen;
}
numElements++;
}
return numElements;
}
/**
* Decode a String as a series of VarInts, one per character.<p/>
*
*/
private final ThreadLocal<char[]> chararr = new ThreadLocal<char[]>();
private String readString(ByteData data, long position, int length) {
long endPosition = position + length;
char chararr[] = getCharArray(length);
int count = 0;
while(position < endPosition) {
int c = VarInt.readVInt(data, position);
chararr[count++] = (char)c;
position += VarInt.sizeOfVInt(c);
}
// The number of chars may be fewer than the number of bytes in the serialized data
return new String(chararr, 0, count);
}
private char[] getCharArray(int length) {
if(length < 100)
length = 100;
char ch[] = chararr.get();
if(ch == null || ch.length < length) {
ch = new char[length];
chararr.set(ch);
}
return ch;
}
FlatBlobDeserializationRecord getDeserializationRecord(String type) {
Map<String, FlatBlobDeserializationRecord> map = deserializationRecords.get();
if(map == null) {
map = new HashMap<String, FlatBlobDeserializationRecord>();
deserializationRecords.set(map);
}
FlatBlobDeserializationRecord rec = map.get(type);
if(rec == null) {
FastBlobSchema schema = framework.getSerializer(type).getFastBlobSchema();
rec = new FlatBlobDeserializationRecord(schema);
map.put(type, rec);
}
return rec;
}
@SuppressWarnings("unchecked")
<T> FlatBlobTypeCache<T> getTypeCache(String type) {
return (FlatBlobTypeCache<T>)typeCaches.get(type);
}
}
| 8,395 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/flatblob/FlatBlobTypeCache.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.flatblob;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
public class FlatBlobTypeCache<T> {
private final String name;
private final ConcurrentHashMap<Integer, ObjectIdentityKey> references;
private final ConcurrentHashMap<ObjectIdentityKey, Integer> ordinalLookup;
public FlatBlobTypeCache(String name) {
this.name = name;
this.references = new ConcurrentHashMap<Integer, ObjectIdentityKey>();
this.ordinalLookup = new ConcurrentHashMap<ObjectIdentityKey, Integer>();
}
public String getName() {
return name;
}
@SuppressWarnings("unchecked")
public T putIfAbsent(int ordinal, T obj) {
if(ordinal >= 0) {
Integer ordinalInteger = Integer.valueOf(ordinal);
/// create a new key
ObjectIdentityKey key = new ObjectIdentityKey(obj);
while(true) {
/// try to put the key in the references map.
ObjectIdentityKey existingKey = references.putIfAbsent(ordinalInteger, key);
if(existingKey == null) {
ordinalLookup.put(key, ordinalInteger);
return obj;
}
/// if unsuccessful, try to increment the references for the key which won the race
if(existingKey.tryIncrementReferences())
return (T) existingKey.getObject();
/// use the older object in the cache.
key.setObject(existingKey.getObject());
/// this will spin, but not acquire the lock, thus preventing starvation
while(references.get(ordinalInteger) == existingKey);
}
}
return obj;
}
public void evict(T obj) {
ObjectIdentityKey lookupKey = getLookupKey(obj);
if(lookupKey != null) {
Integer ordinalInteger = ordinalLookup.get(lookupKey);
ObjectIdentityKey actualKey = references.get(ordinalInteger);
if(actualKey.decrementReferences()) {
ordinalLookup.remove(actualKey);
references.remove(ordinalInteger);
}
}
}
@SuppressWarnings("unchecked")
public T get(int ordinal) {
if(ordinal < 0)
return null;
ObjectIdentityKey identityKey = references.get(Integer.valueOf(ordinal));
if(identityKey != null && identityKey.tryIncrementReferences())
return (T) identityKey.getObject();
return null;
}
/// cache lookup keys to reduce object allocation.
private static ThreadLocal<ObjectIdentityKey> lookupKey = new ThreadLocal<ObjectIdentityKey>();
private ObjectIdentityKey getLookupKey(Object obj) {
ObjectIdentityKey key = lookupKey.get();
if(key == null) {
key = new ObjectIdentityKey();
lookupKey.set(key);
}
key.setObject(obj);
return key;
}
private static class ObjectIdentityKey {
private Object obj;
private final AtomicInteger referenceCount;
public ObjectIdentityKey() {
this.referenceCount = new AtomicInteger(0);
}
public ObjectIdentityKey(Object obj) {
this.obj = obj;
this.referenceCount = new AtomicInteger(1);
}
public Object getObject() {
return obj;
}
public void setObject(Object obj) {
this.obj = obj;
}
/**
* We will only increment references if the number of references does not equal 0.
*
* If the number of references reaches 0, then this entry will be scheduled for eviction.
*
* @return
*/
public boolean tryIncrementReferences() {
while(true) {
int current = referenceCount.get();
if(current == 0)
return false;
int next = current + 1;
if (referenceCount.compareAndSet(current, next))
return true;
}
}
/**
* Decrement references, and return true if the number of references reaches 0.
*
* @return
*/
public boolean decrementReferences() {
return referenceCount.decrementAndGet() == 0;
}
public int hashCode() {
return System.identityHashCode(obj);
}
public boolean equals(Object other) {
if(other instanceof ObjectIdentityKey) {
return obj == ((ObjectIdentityKey)other).getObject();
}
return false;
}
}
}
| 8,396 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/flatblob/FlatBlobEvictionFrameworkSerializer.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.flatblob;
import com.netflix.zeno.serializer.FrameworkSerializer;
import com.netflix.zeno.serializer.NFTypeSerializer;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
public class FlatBlobEvictionFrameworkSerializer extends FrameworkSerializer<FlatBlobSerializationRecord> {
private final FlatBlobSerializationFramework flatBlobFramework;
public FlatBlobEvictionFrameworkSerializer(FlatBlobEvictor evictor, FlatBlobSerializationFramework flatBlobFramework) {
super(evictor);
this.flatBlobFramework = flatBlobFramework;
}
@Override
public void serializePrimitive(FlatBlobSerializationRecord rec, String fieldName, Object value) {
/// nothing to do.
}
@Override
public void serializeBytes(FlatBlobSerializationRecord rec, String fieldName, byte[] value) {
/// nothing to do.
}
/*
* @Deprecated instead use serializeObject(NFSerializationRecord rec, String fieldName, Object obj)
*
*/
@Override
@Deprecated
@SuppressWarnings("unchecked")
public void serializeObject(FlatBlobSerializationRecord rec, String fieldName, String typeName, Object obj) {
getSerializer(typeName).serialize(obj, rec);
flatBlobFramework.getTypeCache(typeName).evict(obj);
}
@Override
public void serializeObject(FlatBlobSerializationRecord rec, String fieldName, Object obj) {
serializeObject(rec, fieldName, rec.getObjectType(fieldName), obj);
}
@Override
@SuppressWarnings("unchecked")
public <T> void serializeList(FlatBlobSerializationRecord rec, String fieldName, String typeName, Collection<T> obj) {
FlatBlobTypeCache<T> typeCache = flatBlobFramework.getTypeCache(typeName);
NFTypeSerializer<T> serializer = getSerializer(typeName);
for(T t : obj) {
serializer.serialize(t, rec);
typeCache.evict(t);
}
}
@Override
public <T> void serializeSet(FlatBlobSerializationRecord rec, String fieldName, String typeName, Set<T> obj) {
serializeList(rec, fieldName, typeName, obj);
}
@Override
@SuppressWarnings("unchecked")
public <K, V> void serializeMap(FlatBlobSerializationRecord rec, String fieldName, String keyTypeName, String valueTypeName, Map<K, V> obj) {
FlatBlobTypeCache<K> keyCache = flatBlobFramework.getTypeCache(keyTypeName);
FlatBlobTypeCache<V> valueCache = flatBlobFramework.getTypeCache(valueTypeName);
NFTypeSerializer<K> keySerializer = getSerializer(keyTypeName);
NFTypeSerializer<V> valueSerializer = getSerializer(valueTypeName);
for(Map.Entry<K, V> entry : obj.entrySet()) {
keySerializer.serialize(entry.getKey(), rec);
keyCache.evict(entry.getKey());
valueSerializer.serialize(entry.getValue(), rec);
valueCache.evict(entry.getValue());
}
}
}
| 8,397 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/json/JsonFrameworkDeserializer.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.json;
import com.fasterxml.jackson.databind.JsonNode;
import com.netflix.zeno.serializer.FrameworkDeserializer;
import com.netflix.zeno.serializer.NFTypeSerializer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.commons.codec.binary.Base64;
public class JsonFrameworkDeserializer extends FrameworkDeserializer<JsonReadGenericRecord> {
JsonFrameworkDeserializer(JsonSerializationFramework framework) {
super(framework);
}
@Override
public Boolean deserializeBoolean(JsonReadGenericRecord rec, String fieldName) {
JsonReadGenericRecord record = (JsonReadGenericRecord) rec;
JsonNode node = record.getNode().isBoolean() ? record.getNode() : getJsonNode(rec, fieldName);
if (node == null)
return null;
return node.booleanValue();
}
@Override
public Integer deserializeInteger(JsonReadGenericRecord record, String fieldName) {
JsonNode node = record.getNode().isNumber() ? record.getNode() : getJsonNode(record, fieldName);
if (node == null)
return null;
return node.intValue();
}
@Override
public Long deserializeLong(JsonReadGenericRecord record, String fieldName) {
JsonNode node = record.getNode().isNumber() ? record.getNode() : getJsonNode(record, fieldName);
if (node == null)
return null;
return node.longValue();
}
@Override
public Float deserializeFloat(JsonReadGenericRecord record, String fieldName) {
JsonNode node = record.getNode().isNumber() ? record.getNode() : getJsonNode(record, fieldName);
if (node == null)
return null;
return node.numberValue().floatValue();
}
@Override
public Double deserializeDouble(JsonReadGenericRecord record, String fieldName) {
JsonNode node = record.getNode().isNumber() ? record.getNode() : getJsonNode(record, fieldName);
if (node == null)
return null;
return node.numberValue().doubleValue();
}
@Override
public String deserializeString(JsonReadGenericRecord record, String fieldName) {
JsonNode node = record.getNode().isTextual() ? record.getNode() : getJsonNode(record, fieldName);
if (node == null)
return null;
return node.textValue();
}
/**
* @deprecated use instead deserializeObject(JsonReadGenericRecord rec, String fieldName, Class<T> clazz);
*/
@Deprecated
@Override
public <T> T deserializeObject(JsonReadGenericRecord rec, String fieldName, String typeName, Class<T> clazz) {
JsonNode node = getJsonNode(rec, fieldName);
if (node == null)
return null;
return deserializeObject(rec, typeName, node);
}
@SuppressWarnings({ "rawtypes", "unchecked" })
private <T> T deserializeObject(JsonReadGenericRecord rec, String typeName, JsonNode node) {
NFTypeSerializer serializer = ((NFTypeSerializer) (framework.getSerializer(typeName)));
return (T) serializer.deserialize(new JsonReadGenericRecord(serializer.getFastBlobSchema(), node));
}
@Override
public <T> T deserializeObject(JsonReadGenericRecord rec, String fieldName, Class<T> clazz) {
JsonNode node = getJsonNode(rec, fieldName);
if (node == null)
return null;
return deserializeObject(rec, rec.getObjectType(fieldName), node);
}
@Override
public <T> List<T> deserializeList(JsonReadGenericRecord record, String fieldName, NFTypeSerializer<T> itemSerializer) {
JsonNode node = getJsonNode(record, "list");
if (node == null)
return null;
List<T> list = new ArrayList<T>();
deserializeCollection(node, itemSerializer, list);
return list;
}
@Override
public <T> Set<T> deserializeSet(JsonReadGenericRecord record, String fieldName, NFTypeSerializer<T> itemSerializer) {
JsonNode node = getJsonNode(record, "set");
if (node == null)
return null;
Set<T> set = new HashSet<T>();
deserializeCollection(node, itemSerializer, set);
return set;
}
private JsonNode getJsonNode(Object rec, String fieldName) {
JsonReadGenericRecord record = (JsonReadGenericRecord) rec;
JsonNode node = record.getNode().get(fieldName);
if (node == null || node.isNull()) {
return null;
}
return node;
}
private <T> void deserializeCollection(JsonNode nodes, NFTypeSerializer<T> itemSerializer, Collection<T> elements) {
try {
for (Iterator<JsonNode> it = nodes.elements(); it.hasNext();) {
JsonNode node = it.next();
T element = itemSerializer.deserialize(new JsonReadGenericRecord(itemSerializer.getFastBlobSchema(), node));
elements.add(element);
}
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
@Override
public <K, V> Map<K, V> deserializeMap(JsonReadGenericRecord record, String fieldName, NFTypeSerializer<K> keySerializer, NFTypeSerializer<V> valueSerializer) {
return deserializeIntoMap(record, fieldName, keySerializer, valueSerializer, new HashMap<K, V>());
}
@Override
public byte[] deserializeBytes(JsonReadGenericRecord record, String fieldName) {
String str = deserializeString(record, fieldName);
if (str == null) {
return null;
}
return Base64.decodeBase64(str);
}
@Override
public <K, V> SortedMap<K, V> deserializeSortedMap(JsonReadGenericRecord record, String fieldName, NFTypeSerializer<K> keySerializer, NFTypeSerializer<V> valueSerializer) {
return deserializeIntoMap(record, fieldName, keySerializer, valueSerializer, new TreeMap<K, V>());
}
private <K, V, M extends Map<K, V>> M deserializeIntoMap(JsonReadGenericRecord rec, String fieldName, NFTypeSerializer<K> keySerializer, NFTypeSerializer<V> valueSerializer, M map) {
JsonNode node = getJsonNode(rec, fieldName);
if (node == null) {
return null;
}
for (Iterator<JsonNode> it = node.elements(); it.hasNext();) {
JsonNode element = it.next();
K key = keySerializer.deserialize(new JsonReadGenericRecord(keySerializer.getFastBlobSchema(), element.get("key")));
V value = valueSerializer.deserialize(new JsonReadGenericRecord(valueSerializer.getFastBlobSchema(), element.get("value")));
map.put(key, value);
}
return map;
}
}
| 8,398 |
0 | Create_ds/zeno/src/main/java/com/netflix/zeno | Create_ds/zeno/src/main/java/com/netflix/zeno/json/JsonReadGenericRecord.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.zeno.json;
import com.fasterxml.jackson.databind.JsonNode;
import com.netflix.zeno.fastblob.record.schema.FastBlobSchema;
import com.netflix.zeno.serializer.NFDeserializationRecord;
/**
* @author tvaliulin
*
*/
public class JsonReadGenericRecord extends NFDeserializationRecord {
private final JsonNode node;
public JsonReadGenericRecord(FastBlobSchema schema, JsonNode node) {
super(schema);
this.node = node;
}
JsonNode getNode() {
return node;
}
}
| 8,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.