index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/UtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Note: Lincoln's Gettysburg Address is in the public domain. See LICENSE.
*/
package org.apache.datasketches.memory.internal;
import static org.apache.datasketches.memory.internal.Util.characterPad;
import static org.apache.datasketches.memory.internal.Util.getResourceBytes;
import static org.apache.datasketches.memory.internal.Util.getResourceFile;
import static org.apache.datasketches.memory.internal.Util.negativeCheck;
import static org.apache.datasketches.memory.internal.Util.nullCheck;
import static org.apache.datasketches.memory.internal.Util.zeroCheck;
import static org.apache.datasketches.memory.internal.Util.zeroPad;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertTrue;
import static org.testng.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.LinkOption;
import java.nio.file.attribute.PosixFileAttributeView;
import java.nio.file.attribute.PosixFileAttributes;
import java.nio.file.attribute.PosixFilePermissions;
import org.apache.datasketches.memory.MemoryBoundsException;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.annotations.Test;
public class UtilTest {
private static final String LS = System.getProperty("line.separator");
//Binary Search
@Test
public void checkBinarySearch() {
int k = 1024; //longs
WritableMemory wMem = WritableMemory.allocate(k << 3); //1024 longs
for (int i = 0; i < k; i++) { wMem.putLong(i << 3, i); }
long idx = Util.binarySearchLongs(wMem, 0, k - 1, k / 2);
long val = wMem.getLong(idx << 3);
assertEquals(idx, k / 2);
assertEquals(val, k / 2);
idx = Util.binarySearchLongs(wMem, 0, k - 1, k);
assertEquals(idx, -1024);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void checkBoundsTest() {
ResourceImpl.checkBounds(999, 2, 1000);
}
@Test
public void checkPadding() {
String s = "123";
String t = zeroPad(s, 4);
assertTrue(t.startsWith("0"));
t = characterPad(s, 4, '0', true);
assertTrue(t.endsWith("0"));
t = characterPad(s, 3, '0', false);
assertEquals(s, t);
}
@Test
public void checkNullZeroNegativeChecks() {
Object obj = null;
try {
nullCheck(obj, "Test Object");
fail();
} catch (IllegalArgumentException e) {
//OK
}
try {
zeroCheck(0, "Test Long");
fail();
} catch (IllegalArgumentException e) {
//OK
}
try {
negativeCheck(-1L, "Test Long");
fail();
} catch (IllegalArgumentException e) {
//OK
}
}
@Test
public void checkCodePointArr() {
final Util.RandomCodePoints rvcp = new Util.RandomCodePoints(true);
final int n = 1000;
final int[] cpArr = new int[n];
rvcp.fillCodePointArray(cpArr);
for (int i = 0; i < n; i++) {
int cp = cpArr[i];
if ((cp >= Character.MIN_SURROGATE) && (cp <= Character.MAX_SURROGATE)) {
fail();
}
}
}
@Test
public void checkCodePoint() {
final Util.RandomCodePoints rvcp = new Util.RandomCodePoints(true);
final int n = 1000;
for (int i = 0; i < n; i++) {
int cp = rvcp.getCodePoint();
if ((cp >= Character.MIN_SURROGATE) && (cp <= Character.MAX_SURROGATE)) {
fail();
}
}
}
static final String getFileAttributes(File file) {
try {
PosixFileAttributes attrs = Files.getFileAttributeView(
file.toPath(), PosixFileAttributeView.class, new LinkOption[0]).readAttributes();
String s = String.format("%s: %s %s %s%n",
file.getPath(),
attrs.owner().getName(),
attrs.group().getName(),
PosixFilePermissions.toString(attrs.permissions()));
return s;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
static final void setGettysburgAddressFileToReadOnly() {
File file = getResourceFile("GettysburgAddress.txt");
try {
Files.setPosixFilePermissions(file.toPath(), PosixFilePermissions.fromString("r--r--r--"));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
//Resources
@Test
public void resourceFileExits() {
final String shortFileName = "GettysburgAddress.txt";
final File file = getResourceFile(shortFileName);
assertTrue(file.exists());
}
@Test(expectedExceptions = NullPointerException.class)
public void resourceFileNotFound() {
final String shortFileName = "GettysburgAddress.txt";
getResourceFile(shortFileName + "123");
}
@Test
public void resourceBytesCorrect() {
final String shortFileName = "GettysburgAddress.txt";
final byte[] bytes = getResourceBytes(shortFileName);
assertTrue(bytes.length == 1541);
}
@Test(expectedExceptions = NullPointerException.class)
public void resourceBytesFileNotFound() {
final String shortFileName = "GettysburgAddress.txt";
getResourceBytes(shortFileName + "123");
}
@Test
public void printlnTest() {
println("PRINTING: " + this.getClass().getName());
}
static void println(final Object o) {
if (o == null) { print(LS); }
else { print(o.toString() + LS); }
}
/**
* @param o value to print
*/
static void print(final Object o) {
if (o != null) {
//System.out.print(o.toString()); //disable here
}
}
}
| 2,300 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/CopyMemoryOverlapTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.testng.Assert.assertEquals;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.annotations.Test;
/**
* @author Lee Rhodes
*/
public class CopyMemoryOverlapTest {
@Test
public void checkOverlapUsingMemory() throws Exception {
long copyLongs = 1 << 20;
double overlap = 0.5;
long start_mS = System.currentTimeMillis();
copyUsingDirectMemory(copyLongs, overlap, true);
long end1_mS = System.currentTimeMillis();
copyUsingDirectMemory(copyLongs, overlap, false);
long end2_mS = System.currentTimeMillis();
println("CopyUp Time Sec: " + ((end1_mS - start_mS) / 1000.0));
println("CopyDn Time Sec: " + ((end2_mS - end1_mS) / 1000.0));
}
@Test
public void checkOverlapUsingRegions() throws Exception {
long copyLongs = 1 << 20;
double overlap = 0.5;
long start_mS = System.currentTimeMillis();
copyUsingDirectRegions(copyLongs, overlap, true);
long end1_mS = System.currentTimeMillis();
copyUsingDirectRegions(copyLongs, overlap, false);
long end2_mS = System.currentTimeMillis();
println("CopyUp Time Sec: " + ((end1_mS - start_mS) / 1000.0));
println("CopyDn Time Sec: " + ((end2_mS - end1_mS) / 1000.0));
}
private static final void copyUsingDirectMemory(long copyLongs, double overlap, boolean copyUp) throws Exception {
println("Copy Using Direct Memory");
long overlapLongs = (long) (overlap * copyLongs);
long backingLongs = (2 * copyLongs) - overlapLongs;
long fromOffsetLongs;
long toOffsetLongs;
//long deltaLongs;
if (copyUp) {
fromOffsetLongs = 0;
toOffsetLongs = copyLongs - overlapLongs;
//deltaLongs = toOffsetLongs - fromOffsetLongs;
} else {
fromOffsetLongs = copyLongs - overlapLongs;
toOffsetLongs = 0;
//deltaLongs = toOffsetLongs - fromOffsetLongs;
}
long backingBytes = backingLongs << 3;
long copyBytes = copyLongs << 3;
long fromOffsetBytes = fromOffsetLongs << 3;
long toOffsetBytes = toOffsetLongs << 3;
//long deltaBytes = deltaLongs << 3;
println("Copy longs : " + copyLongs + "\t bytes: " + copyBytes);
println("Overlap : " + (overlap * 100.0) + "%");
println("CopyUp : " + copyUp);
println("Backing longs: " + backingLongs + "\t bytes: " + backingBytes);
try (WritableMemory backingMem = WritableMemory.allocateDirect(backingBytes)) {
fill(backingMem); //fill mem with 0 thru copyLongs -1
//listMem(backingMem, "Original");
backingMem.copyTo(fromOffsetBytes, backingMem, toOffsetBytes, copyBytes);
//listMem(backingMem, "After");
checkMemLongs(backingMem, fromOffsetLongs, toOffsetLongs, copyLongs);
}
println("");
}
private static final void copyUsingDirectRegions(long copyLongs, double overlap, boolean copyUp) throws Exception {
println("Copy Using Direct Memory");
long overlapLongs = (long) (overlap * copyLongs);
long backingLongs = (2 * copyLongs) - overlapLongs;
long fromOffsetLongs;
long toOffsetLongs;
//long deltaLongs;
if (copyUp) {
fromOffsetLongs = 0;
toOffsetLongs = copyLongs - overlapLongs;
//deltaLongs = toOffsetLongs - fromOffsetLongs;
} else {
fromOffsetLongs = copyLongs - overlapLongs;
toOffsetLongs = 0;
//deltaLongs = toOffsetLongs - fromOffsetLongs;
}
long backingBytes = backingLongs << 3;
long copyBytes = copyLongs << 3;
long fromOffsetBytes = fromOffsetLongs << 3;
long toOffsetBytes = toOffsetLongs << 3;
//long deltaBytes = deltaLongs << 3;
println("Copy longs : " + copyLongs + "\t bytes: " + copyBytes);
println("Overlap : " + (overlap * 100.0) + "%");
println("CopyUp : " + copyUp);
println("Backing longs: " + backingLongs + "\t bytes: " + backingBytes);
try (WritableMemory backingMem = WritableMemory.allocateDirect(backingBytes)) {
fill(backingMem); //fill mem with 0 thru copyLongs -1
//listMem(backingMem, "Original");
WritableMemory reg1 = backingMem.writableRegion(fromOffsetBytes, copyBytes);
WritableMemory reg2 = backingMem.writableRegion(toOffsetBytes, copyBytes);
reg1.copyTo(0, reg2, 0, copyBytes);
//listMem(backingMem, "After");
checkMemLongs(reg2, fromOffsetLongs, 0, copyLongs);
}
println("");
}
private static final void fill(WritableMemory wmem) {
long longs = wmem.getCapacity() >>> 3;
for (long i = 0; i < longs; i++) { wmem.putLong(i << 3, i); } //fill with 0 .. (longs - 1)
//checkMemLongs(wmem, 0L, 0L, longs);
}
private static final void checkMemLongs(Memory mem, long fromOffsetLongs, long toOffsetLongs, long copyLongs) {
for (long i = 0; i < copyLongs; i++) {
long memVal = mem.getLong((toOffsetLongs + i) << 3);
assertEquals(memVal, fromOffsetLongs + i);
}
}
@SuppressWarnings("unused")
private static final void listMem(Memory mem, String comment) {
println(comment);
println("Idx\tValue");
long longs = mem.getCapacity() >>> 3;
for (long i = 0; i < longs; i++) {
println(i + "\t" + mem.getLong(i << 3));
}
}
@Test
public void printlnTest() {
println("PRINTING: " + this.getClass().getName());
}
/**
* @param s value to print
*/
static void println(String s) {
//System.out.println(s); //disable here
}
}
| 2,301 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/VirtualMachineMemoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import org.testng.annotations.Test;
@SuppressWarnings({"unused"})
public class VirtualMachineMemoryTest {
@Test
public void inertPageAlignment() {
boolean result = VirtualMachineMemory.getIsPageAligned();
//System.out.println("VM page alignment:" + result);
assert (true); //no exception was thrown
}
}
| 2,302 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/BaseBufferTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.testng.Assert.fail;
import org.apache.datasketches.memory.Buffer;
import org.apache.datasketches.memory.BufferPositionInvariantsException;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.annotations.Test;
/**
* @author Lee Rhodes
*/
public class BaseBufferTest {
@Test
public void checkLimits() {
Buffer buf = Memory.wrap(new byte[100]).asBuffer();
buf.setStartPositionEnd(40, 45, 50);
buf.setStartPositionEnd(0, 0, 100);
try {
buf.setStartPositionEnd(0, 0, 101);
fail();
} catch (BufferPositionInvariantsException e) {
//ok
}
}
@Test
public void checkLimitsAndCheck() {
Buffer buf = Memory.wrap(new byte[100]).asBuffer();
buf.setStartPositionEnd(40, 45, 50);
buf.setStartPositionEnd(0, 0, 100);
try {
buf.setStartPositionEnd(0, 0, 101);
fail();
} catch (BufferPositionInvariantsException e) {
//ok
}
buf.setPosition(100);
try {
buf.setPosition(101);
fail();
} catch (BufferPositionInvariantsException e) {
//ok
}
buf.setPosition(99);
buf.incrementAndCheckPosition(1L);
try {
buf.incrementAndCheckPosition(1L);
fail();
} catch (BufferPositionInvariantsException e) {
//ok
}
}
@Test(expectedExceptions = IllegalStateException.class)
public void checkCheckValid() {
Buffer buf;
try (WritableMemory wmem = WritableMemory.allocateDirect(100)) {
buf = wmem.asBuffer();
}
@SuppressWarnings("unused")
Memory mem = buf.asMemory(); //wmem, buf no longer valid
}
}
| 2,303 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/WritableMemoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertTrue;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableBuffer;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.annotations.Test;
public class WritableMemoryTest {
@Test
public void wrapBigEndian() {
ByteBuffer bb = ByteBuffer.allocate(64); //big endian
WritableMemory wmem = WritableMemory.writableWrap(bb);
assertEquals(wmem.getByteOrder(), ByteOrder.BIG_ENDIAN);
wmem = WritableMemory.writableWrap(bb, ByteOrder.nativeOrder());
assertEquals(wmem.getByteOrder(), ByteOrder.LITTLE_ENDIAN);
}
@Test
public void wrapBigEndianAsLittle() {
ByteBuffer bb = ByteBuffer.allocate(64);
bb.putChar(0, (char)1); //as NNO
WritableMemory wmem = WritableMemory.writableWrap(bb, ByteOrder.LITTLE_ENDIAN, null);
assertEquals(wmem.getChar(0), 256);
}
@Test
public void allocateWithByteOrder() {
WritableMemory wmem = WritableMemory.allocate(64, ByteOrder.BIG_ENDIAN);
assertEquals(wmem.getByteOrder(), ByteOrder.BIG_ENDIAN);
wmem = WritableMemory.allocate(64, ByteOrder.LITTLE_ENDIAN);
assertEquals(wmem.getByteOrder(), ByteOrder.LITTLE_ENDIAN);
wmem = WritableMemory.writableWrap(new byte[64], 32, 32, ByteOrder.BIG_ENDIAN);
assertEquals(wmem.getByteOrder(), ByteOrder.BIG_ENDIAN);
}
@Test
public void checkGetArray() {
byte[] byteArr = new byte[64];
WritableMemory wmem = WritableMemory.writableWrap(byteArr);
assertTrue(((BaseWritableMemoryImpl) wmem).getArray() == byteArr);
WritableBuffer wbuf = wmem.asWritableBuffer();
assertTrue(((BaseWritableBufferImpl)wbuf).getArray() == byteArr);
}
@Test(expectedExceptions = IllegalArgumentException.class)
public void checkSelfArrayCopy() {
byte[] srcAndDst = new byte[128];
WritableMemory wmem = WritableMemory.writableWrap(srcAndDst);
wmem.getByteArray(0, srcAndDst, 64, 64); //non-overlapping
}
@Test
public void checkEquals() {
int len = 7;
WritableMemory wmem1 = WritableMemory.allocate(len);
//@SuppressWarnings({"EqualsWithItself", "SelfEquals"}) //unsupported
//SelfEquals for Plexus, EqualsWithItself for IntelliJ
//boolean eq1 = wmem1.equalTo(wmem1); //strict profile complains
//assertTrue(eq1);
WritableMemory wmem2 = WritableMemory.allocate(len + 1);
assertFalse(wmem1.equalTo(wmem2));
WritableMemory reg1 = wmem1.writableRegion(0, wmem1.getCapacity());
assertTrue(wmem1.equalTo(reg1));
wmem2 = WritableMemory.allocate(len);
for (int i = 0; i < len; i++) {
wmem1.putByte(i, (byte) i);
wmem2.putByte(i, (byte) i);
}
assertTrue(wmem1.equalTo(wmem2));
assertTrue(wmem1.equalTo(0, wmem1, 0, len));
reg1 = wmem1.writableRegion(0, wmem1.getCapacity());
assertTrue(wmem1.equalTo(0, reg1, 0, len));
len = 24;
wmem1 = WritableMemory.allocate(len);
wmem2 = WritableMemory.allocate(len);
for (int i = 0; i < len; i++) {
wmem1.putByte(i, (byte) i);
wmem2.putByte(i, (byte) i);
}
assertTrue(wmem1.equalTo(0, wmem2, 0, len - 1));
assertTrue(wmem1.equalTo(0, wmem2, 0, len));
wmem2.putByte(0, (byte) 10);
assertFalse(wmem1.equalTo(0, wmem2, 0, len));
wmem2.putByte(0, (byte) 0);
wmem2.putByte(len - 2, (byte) 0);
assertFalse(wmem1.equalTo(0, wmem2, 0, len - 1));
}
@Test
public void checkEquals2() {
int len = 23;
WritableMemory wmem1 = WritableMemory.allocate(len);
assertFalse(wmem1.equalTo(null));
//@SuppressWarnings({"EqualsWithItself", "SelfEquals"}) //unsupported
//SelfEquals for Plexus, EqualsWithItself for IntelliJ
//boolean eq1 = wmem1.equalTo(wmem1); //strict profile complains
//assertTrue(eq1);
WritableMemory wmem2 = WritableMemory.allocate(len + 1);
assertFalse(wmem1.equalTo(wmem2));
for (int i = 0; i < len; i++) {
wmem1.putByte(i, (byte) i);
wmem2.putByte(i, (byte) i);
}
assertTrue(wmem1.equalTo(0, wmem2, 0, len));
assertTrue(wmem1.equalTo(1, wmem2, 1, len - 1));
}
@Test
public void checkLargeEquals() {
// Size bigger than UNSAFE_COPY_MEMORY_THRESHOLD; size with "reminder" = 7, to test several
// traits of the implementation
final int thresh = Util.UNSAFE_COPY_THRESHOLD_BYTES;
byte[] bytes1 = new byte[(thresh * 2) + 7];
ThreadLocalRandom.current().nextBytes(bytes1);
byte[] bytes2 = bytes1.clone();
Memory mem1 = Memory.wrap(bytes1);
Memory mem2 = Memory.wrap(bytes2);
assertTrue(mem1.equalTo(mem2));
bytes2[thresh + 10] = (byte) (bytes1[thresh + 10] + 1);
assertFalse(mem1.equalTo(mem2));
bytes2[thresh + 10] = bytes1[thresh + 10];
bytes2[(thresh * 2) + 3] = (byte) (bytes1[(thresh * 2) + 3] + 1);
assertFalse(mem1.equalTo(mem2));
}
@Test
public void checkWrapWithBO() {
WritableMemory wmem = WritableMemory.writableWrap(new byte[0], ByteOrder.BIG_ENDIAN);
boolean nativeBO = wmem.getByteOrder() == ByteOrder.nativeOrder();
assertFalse(nativeBO);
println("" + nativeBO);
wmem = WritableMemory.writableWrap(new byte[8], ByteOrder.BIG_ENDIAN);
nativeBO = wmem.getByteOrder() == ByteOrder.nativeOrder();
assertFalse(nativeBO);
println("" + nativeBO);
}
@Test
@SuppressWarnings("unused")
public void checkOwnerClientCase() {
WritableMemory owner = WritableMemory.allocate(64);
Memory client1 = owner; //Client1 cannot write (no API)
owner.putInt(0, 1); //But owner can write
((WritableMemory)client1).putInt(0, 2); //Client1 can write, but with explicit effort.
Memory client2 = owner.region(0, owner.getCapacity()); //client2 cannot write (no API)
owner.putInt(0, 3); //But Owner should be able to write
}
@Test
public void printlnTest() {
println("PRINTING: " + this.getClass().getName());
}
/**
* @param s value to print
*/
static void println(final String s) {
//System.out.println(s);
}
}
| 2,304 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/XxHash64Test.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.apache.datasketches.memory.XxHash.hashBooleanArr;
import static org.apache.datasketches.memory.XxHash.hashByteArr;
import static org.apache.datasketches.memory.XxHash.hashCharArr;
import static org.apache.datasketches.memory.XxHash.hashDoubleArr;
import static org.apache.datasketches.memory.XxHash.hashFloatArr;
import static org.apache.datasketches.memory.XxHash.hashIntArr;
import static org.apache.datasketches.memory.XxHash.hashLong;
import static org.apache.datasketches.memory.XxHash.hashLongArr;
import static org.apache.datasketches.memory.XxHash.hashShortArr;
import static org.apache.datasketches.memory.XxHash.hashString;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertTrue;
import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.datasketches.memory.Resource;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.annotations.Test;
import net.openhft.hashing.LongHashFunction;
/**
* @author Lee Rhodes
*/
public class XxHash64Test {
@Test
public void offsetChecks() {
long seed = 12345;
int blocks = 6;
int cap = blocks * 16;
long hash;
WritableMemory wmem = WritableMemory.allocate(cap);
for (int i = 0; i < cap; i++) { wmem.putByte(i, (byte)(-128 + i)); }
for (int offset = 0; offset < 16; offset++) {
int arrLen = cap - offset;
hash = wmem.xxHash64(offset, arrLen, seed);
assertTrue(hash != 0);
}
}
@Test
public void byteArrChecks() {
long seed = 0;
int offset = 0;
int bytes = 16;
for (int j = 1; j < bytes; j++) {
byte[] in = new byte[bytes];
WritableMemory wmem = WritableMemory.writableWrap(in);
for (int i = 0; i < j; i++) { wmem.putByte(i, (byte) (-128 + i)); }
long hash = wmem.xxHash64(offset, bytes, seed);
assertTrue(hash != 0);
}
}
/*
* This test is adapted from
* <a href="https://github.com/OpenHFT/Zero-Allocation-Hashing/blob/master/
* src/test/java/net/openhft/hashing/XxHashCollisionTest.java">
* OpenHFT/Zero-Allocation-Hashing</a> to test hash compatibility with that implementation.
* It is licensed under Apache License, version 2.0. See LICENSE.
*/
@Test
public void collisionTest() {
WritableMemory wmem = WritableMemory.allocate(128);
wmem.putLong(0, 1);
wmem.putLong(16, 42);
wmem.putLong(32, 2);
long h1 = wmem.xxHash64(0, wmem.getCapacity(), 0);
wmem.putLong(0, 1L + 0xBA79078168D4BAFL);
wmem.putLong(32, 2L + 0x9C90005B80000000L);
long h2 = wmem.xxHash64(0, wmem.getCapacity(), 0);
assertEquals(h1, h2);
wmem.putLong(0, 1L + (0xBA79078168D4BAFL * 2));
wmem.putLong(32, 2L + (0x392000b700000000L)); //= (0x9C90005B80000000L * 2) fix overflow false pos
long h3 = wmem.xxHash64(0, wmem.getCapacity(), 0);
assertEquals(h2, h3);
}
/**
* This simple test compares the output of {@link Resource#xxHash64(long, long, long)} with the
* output of {@link net.openhft.hashing.LongHashFunction}, that itself is tested against the
* reference implementation in C. This increase confidence that the xxHash function implemented
* in this package is in fact the same xxHash function implemented in C.
*
* @author Roman Leventov
* @author Lee Rhodes
*/
@Test
public void testXxHash() {
Random random = ThreadLocalRandom.current();
for (int len = 0; len < 100; len++) {
byte[] bytes = new byte[len];
for (int i = 0; i < 10; i++) {
long zahXxHash = LongHashFunction.xx().hashBytes(bytes);
long memoryXxHash = Memory.wrap(bytes).xxHash64(0, len, 0);
assertEquals(memoryXxHash, zahXxHash);
random.nextBytes(bytes);
}
}
}
private static final byte[] barr = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
@Test
public void testArrHashes() {
WritableMemory wmem = WritableMemory.writableWrap(barr);
long hash0 = wmem.xxHash64(8, 8, 0);
long hash1 = hashByteArr(barr, 8, 8, 0);
assertEquals(hash1, hash0);
char[] carr = new char[8];
wmem.getCharArray(0, carr, 0, 8);
hash1 = hashCharArr(carr, 4, 4, 0);
assertEquals(hash1, hash0);
short[] sarr = new short[8];
wmem.getShortArray(0, sarr, 0, 8);
hash1 = hashShortArr(sarr, 4, 4, 0);
assertEquals(hash1, hash0);
int[] iarr = new int[4];
wmem.getIntArray(0, iarr, 0, 4);
hash1 = hashIntArr(iarr, 2, 2, 0);
assertEquals(hash1, hash0);
float[] farr = new float[4];
wmem.getFloatArray(0, farr, 0, 4);
hash1 = hashFloatArr(farr, 2, 2, 0);
assertEquals(hash1, hash0);
long[] larr = new long[2];
wmem.getLongArray(0, larr, 0, 2);
hash1 = hashLongArr(larr, 1, 1, 0);
long in = wmem.getLong(8);
long hash2 = hashLong(in, 00); //tests the single long hash
assertEquals(hash1, hash0);
assertEquals(hash2, hash0);
double[] darr = new double[2];
wmem.getDoubleArray(0, darr, 0, 2);
hash1 = hashDoubleArr(darr, 1, 1, 0);
assertEquals(hash1, hash0);
boolean[] blarr = new boolean[16];
wmem.getBooleanArray(0, blarr, 0, 16); //any byte != 0 is true
hash1 = hashBooleanArr(blarr, 8, 8, 0);
assertEquals(hash1, hash0);
}
@Test
public void testString() {
String s = "Now is the time for all good men to come to the aid of their country.";
char[] arr = s.toCharArray();
long hash0 = hashString(s, 0, s.length(), 0);
long hash1 = hashCharArr(arr, 0, arr.length, 0);
assertEquals(hash1, hash0);
}
}
| 2,305 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/CommonMemoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.apache.datasketches.memory.internal.Util.isAllBitsClear;
import static org.apache.datasketches.memory.internal.Util.isAllBitsSet;
import static org.apache.datasketches.memory.internal.Util.isAnyBitsClear;
import static org.apache.datasketches.memory.internal.Util.isAnyBitsSet;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertTrue;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.annotations.Test;
public class CommonMemoryTest {
@Test
public void checkSetGet() throws Exception {
int memCapacity = 16; //must be at least 8
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
assertEquals(mem.getCapacity(), memCapacity);
setGetTests(mem);
}
}
public static void setGetTests(WritableMemory mem) {
mem.putBoolean(0, true);
assertEquals(mem.getBoolean(0), true);
mem.putBoolean(0, false);
assertEquals(mem.getBoolean(0), false);
mem.putByte(0, (byte) -1);
assertEquals(mem.getByte(0), (byte) -1);
mem.putByte(0, (byte) 0);
assertEquals(mem.getByte(0), (byte) 0);
mem.putChar(0, 'A');
assertEquals(mem.getChar(0), 'A');
mem.putChar(0, 'Z');
assertEquals(mem.getChar(0), 'Z');
mem.putShort(0, Short.MAX_VALUE);
assertEquals(mem.getShort(0), Short.MAX_VALUE);
mem.putShort(0, Short.MIN_VALUE);
assertEquals(mem.getShort(0), Short.MIN_VALUE);
mem.putInt(0, Integer.MAX_VALUE);
assertEquals(mem.getInt(0), Integer.MAX_VALUE);
mem.putInt(0, Integer.MIN_VALUE);
assertEquals(mem.getInt(0), Integer.MIN_VALUE);
mem.putFloat(0, Float.MAX_VALUE);
assertEquals(mem.getFloat(0), Float.MAX_VALUE);
mem.putFloat(0, Float.MIN_VALUE);
assertEquals(mem.getFloat(0), Float.MIN_VALUE);
mem.putLong(0, Long.MAX_VALUE);
assertEquals(mem.getLong(0), Long.MAX_VALUE);
mem.putLong(0, Long.MIN_VALUE);
assertEquals(mem.getLong(0), Long.MIN_VALUE);
mem.putDouble(0, Double.MAX_VALUE);
assertEquals(mem.getDouble(0), Double.MAX_VALUE);
mem.putDouble(0, Double.MIN_VALUE);
assertEquals(mem.getDouble(0), Double.MIN_VALUE);
}
@Test
public void checkSetGetArrays() throws Exception {
int memCapacity = 32;
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
assertEquals(memCapacity, mem.getCapacity());
setGetArraysTests(mem);
}
}
public static void setGetArraysTests(WritableMemory mem) {
int accessCapacity = (int)mem.getCapacity();
int words = 4;
boolean[] srcArray1 = {true, false, true, false};
boolean[] dstArray1 = new boolean[words];
mem.fill(0, accessCapacity, (byte)127);
mem.putBooleanArray(0, srcArray1, 0, words);
mem.getBooleanArray(0, dstArray1, 0, words);
for (int i = 0; i < words; i++) {
assertEquals(dstArray1[i], srcArray1[i]);
}
byte[] srcArray2 = { 1, -2, 3, -4 };
byte[] dstArray2 = new byte[4];
mem.putByteArray(0, srcArray2, 0, words);
mem.getByteArray(0, dstArray2, 0, words);
for (int i = 0; i < words; i++) {
assertEquals(dstArray2[i], srcArray2[i]);
}
char[] srcArray3 = { 'A', 'B', 'C', 'D' };
char[] dstArray3 = new char[words];
mem.putCharArray(0, srcArray3, 0, words);
mem.getCharArray(0, dstArray3, 0, words);
for (int i = 0; i < words; i++) {
assertEquals(dstArray3[i], srcArray3[i]);
}
double[] srcArray4 = { 1.0, -2.0, 3.0, -4.0 };
double[] dstArray4 = new double[words];
mem.putDoubleArray(0, srcArray4, 0, words);
mem.getDoubleArray(0, dstArray4, 0, words);
for (int i = 0; i < words; i++) {
assertEquals(dstArray4[i], srcArray4[i], 0.0);
}
float[] srcArray5 = { (float)1.0, (float)-2.0, (float)3.0, (float)-4.0 };
float[] dstArray5 = new float[words];
mem.putFloatArray(0, srcArray5, 0, words);
mem.getFloatArray(0, dstArray5, 0, words);
for (int i = 0; i < words; i++) {
assertEquals(dstArray5[i], srcArray5[i], 0.0);
}
int[] srcArray6 = { 1, -2, 3, -4 };
int[] dstArray6 = new int[words];
mem.putIntArray(0, srcArray6, 0, words);
mem.getIntArray(0, dstArray6, 0, words);
for (int i = 0; i < words; i++) {
assertEquals(dstArray6[i], srcArray6[i]);
}
long[] srcArray7 = { 1, -2, 3, -4 };
long[] dstArray7 = new long[words];
mem.putLongArray(0, srcArray7, 0, words);
mem.getLongArray(0, dstArray7, 0, words);
for (int i = 0; i < words; i++) {
assertEquals(dstArray7[i], srcArray7[i]);
}
short[] srcArray8 = { 1, -2, 3, -4 };
short[] dstArray8 = new short[words];
mem.putShortArray(0, srcArray8, 0, words);
mem.getShortArray(0, dstArray8, 0, words);
for (int i = 0; i < words; i++) {
assertEquals(dstArray8[i], srcArray8[i]);
}
}
@Test
public void checkSetGetPartialArraysWithOffset() throws Exception {
int memCapacity = 32;
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
assertEquals(memCapacity, mem.getCapacity());
setGetPartialArraysWithOffsetTests(mem);
}
}
public static void setGetPartialArraysWithOffsetTests(WritableMemory mem) {
int items = 4;
boolean[] srcArray1 = {true, false, true, false};
boolean[] dstArray1 = new boolean[items];
mem.putBooleanArray(0, srcArray1, 2, items / 2);
mem.getBooleanArray(0, dstArray1, 2, items / 2);
for (int i = 2; i < items; i++) {
assertEquals(dstArray1[i], srcArray1[i]);
}
byte[] srcArray2 = { 1, -2, 3, -4 };
byte[] dstArray2 = new byte[items];
mem.putByteArray(0, srcArray2, 2, items / 2);
mem.getByteArray(0, dstArray2, 2, items / 2);
for (int i = 2; i < items; i++) {
assertEquals(dstArray2[i], srcArray2[i]);
}
char[] srcArray3 = { 'A', 'B', 'C', 'D' };
char[] dstArray3 = new char[items];
mem.putCharArray(0, srcArray3, 2, items / 2);
mem.getCharArray(0, dstArray3, 2, items / 2 );
for (int i = 2; i < items; i++) {
assertEquals(dstArray3[i], srcArray3[i]);
}
double[] srcArray4 = { 1.0, -2.0, 3.0, -4.0 };
double[] dstArray4 = new double[items];
mem.putDoubleArray(0, srcArray4, 2, items / 2 );
mem.getDoubleArray(0, dstArray4, 2, items / 2 );
for (int i = 2; i < items; i++) {
assertEquals(dstArray4[i], srcArray4[i], 0.0);
}
float[] srcArray5 = { (float)1.0, (float)-2.0, (float)3.0, (float)-4.0 };
float[] dstArray5 = new float[items];
mem.putFloatArray(0, srcArray5, 2, items / 2 );
mem.getFloatArray(0, dstArray5, 2, items / 2 );
for (int i = 2; i < items; i++) {
assertEquals(dstArray5[i], srcArray5[i], 0.0);
}
int[] srcArray6 = { 1, -2, 3, -4 };
int[] dstArray6 = new int[items];
mem.putIntArray(0, srcArray6, 2, items / 2 );
mem.getIntArray(0, dstArray6, 2, items / 2 );
for (int i = 2; i < items; i++) {
assertEquals(dstArray6[i], srcArray6[i]);
}
long[] srcArray7 = { 1, -2, 3, -4 };
long[] dstArray7 = new long[items];
mem.putLongArray(0, srcArray7, 2, items / 2 );
mem.getLongArray(0, dstArray7, 2, items / 2 );
for (int i = 2; i < items; i++) {
assertEquals(dstArray7[i], srcArray7[i]);
}
short[] srcArray8 = { 1, -2, 3, -4 };
short[] dstArray8 = new short[items];
mem.putShortArray(0, srcArray8, 2, items / 2 );
mem.getShortArray(0, dstArray8, 2, items / 2 );
for (int i = 2; i < items; i++) {
assertEquals(dstArray8[i], srcArray8[i]);
}
}
@Test
public void checkSetClearIsBits() throws Exception {
int memCapacity = 8;
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
assertEquals(memCapacity, mem.getCapacity());
mem.clear();
setClearIsBitsTests(mem);
}
}
public static void setClearIsBitsTests(WritableMemory mem) {
//single bits
for (int i = 0; i < 8; i++) {
long bitMask = (1 << i);
long v = mem.getByte(0) & 0XFFL;
assertTrue(isAnyBitsClear(v, bitMask));
mem.setBits(0, (byte) bitMask);
v = mem.getByte(0) & 0XFFL;
assertTrue(isAnyBitsSet(v, bitMask));
mem.clearBits(0, (byte) bitMask);
v = mem.getByte(0) & 0XFFL;
assertTrue(isAnyBitsClear(v, bitMask));
}
//multiple bits
for (int i = 0; i < 7; i++) {
long bitMask1 = (1 << i);
long bitMask2 = (3 << i);
long v = mem.getByte(0) & 0XFFL;
assertTrue(isAnyBitsClear(v, bitMask1));
assertTrue(isAnyBitsClear(v, bitMask2));
mem.setBits(0, (byte) bitMask1); //set one bit
v = mem.getByte(0) & 0XFFL;
assertTrue(isAnyBitsSet(v, bitMask2));
assertTrue(isAnyBitsClear(v, bitMask2));
assertFalse(isAllBitsSet(v, bitMask2));
assertFalse(isAllBitsClear(v, bitMask2));
}
}
@Test
public void checkSetClearMemoryRegions() throws Exception {
int memCapacity = 64; //must be 64
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
setClearMemoryRegionsTests(mem); //requires println enabled to visually check
for (int i = 0; i < memCapacity; i++) {
assertEquals(mem.getByte(i), 0);
}
}
}
//enable println statements to visually check
public static void setClearMemoryRegionsTests(WritableMemory mem) {
int accessCapacity = (int)mem.getCapacity();
//define regions
int reg1Start = 0;
int reg1Len = 28;
int reg2Start = 28;
int reg2Len = 32;
//set region 1
byte b1 = 5;
mem.fill(reg1Start, reg1Len, b1);
for (int i = reg1Start; i < (reg1Len + reg1Start); i++) {
assertEquals(mem.getByte(i), b1);
}
//println(mem.toHexString("Region1 to 5", reg1Start, reg1Len));
//set region 2
byte b2 = 7;
mem.fill(reg2Start, reg2Len, b2);
//println(mem.toHexString("Fill", 0, (int)mem.getCapacity()));
for (int i = reg2Start; i < (reg2Len + reg2Start); i++) {
assertEquals(mem.getByte(i), b2);
}
//println(mem.toHexString("Region2 to 7", reg2Start, reg2Len));
//clear region 1
byte zeroByte = 0;
mem.clear(reg1Start, reg1Len);
for (int i = reg1Start; i < (reg1Len + reg1Start); i++) {
assertEquals(mem.getByte(i), zeroByte);
}
//println(mem.toHexString("Region1 cleared", reg1Start, reg1Len));
//clear region 2
mem.clear(reg2Start, reg2Len);
for (int i = reg2Start; i < (reg2Len + reg2Start); i++) {
assertEquals(mem.getByte(i), zeroByte);
}
//println(mem.toHexString("Region2 cleared", reg2Start, reg2Len));
//set all to ones
byte b4 = 127;
mem.fill(b4);
for (int i = 0; i < accessCapacity; i++) {
assertEquals(mem.getByte(i), b4);
}
//println(mem.toHexString("Region1 + Region2 all ones", 0, accessCapacity));
//clear all
mem.clear();
for (int i = 0; i < accessCapacity; i++) {
assertEquals(mem.getByte(i), zeroByte);
}
//println(mem.toHexString("Region1 + Region2 cleared", 0, accessCapacity));
}
@Test
public void checkToHexStringAllMem() throws Exception {
int memCapacity = 48; //must be 48
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
toHexStringAllMemTests(mem); //requires println enabled to visually check
}
}
//enable println to visually check
public static void toHexStringAllMemTests(WritableMemory mem) {
int memCapacity = (int)mem.getCapacity();
for (int i = 0; i < memCapacity; i++) {
mem.putByte(i, (byte)i);
}
//println(mem.toHexString("Check toHexString(0, 48) to integers", 0, memCapacity));
//println(mem.toHexString("Check toHexString(8, 40)", 8, 40));
}
@Test
public void printlnTest() {
println("PRINTING: " + this.getClass().getName());
}
/**
* @param s value to print
*/
static void println(String s) {
//System.out.println(s); //disable here
}
}
| 2,306 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/LeafImplTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.apache.datasketches.memory.internal.Util.NON_NATIVE_BYTE_ORDER;
import static org.apache.datasketches.memory.internal.Util.otherByteOrder;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertNull;
import static org.testng.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableBuffer;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.annotations.Test;
/**
* @author Lee Rhodes
*/
public class LeafImplTest {
private static final ByteOrder NBO = ByteOrder.nativeOrder();
private static final ByteOrder NNBO = NON_NATIVE_BYTE_ORDER;
private static final MemoryRequestServer dummyMemReqSvr = new DummyMemoryRequestServer();
static class DummyMemoryRequestServer implements MemoryRequestServer {
@Override
public WritableMemory request(WritableMemory currentWMem, long capacityBytes) { return null; }
@Override
public void requestClose(WritableMemory memToClose, WritableMemory newMemory) { }
}
@Test
public void checkDirectLeaves() throws Exception {
long off = 0;
long cap = 128;
// Off Heap, Native order, No ByteBuffer, has MemReqSvr
try (WritableMemory memNO = WritableMemory.allocateDirect(cap, NBO, dummyMemReqSvr)) {
memNO.putShort(0, (short) 1);
assertNull(((ResourceImpl)memNO).getUnsafeObject());
assertTrue(memNO.isDirectResource());
checkCombinations(memNO, off, cap, memNO.isDirectResource(), NBO, false, true);
}
// Off Heap, Non Native order, No ByteBuffer, has MemReqSvr
try (WritableMemory memNNO = WritableMemory.allocateDirect(cap, NNBO, dummyMemReqSvr)) {
memNNO.putShort(0, (short) 1);
assertNull(((ResourceImpl)memNNO).getUnsafeObject());
assertTrue(memNNO.isDirectResource());
checkCombinations(memNNO, off, cap, memNNO.isDirectResource(), NNBO, false, true);
}
}
@Test
public void checkByteBufferLeaves() {
long off = 0;
long cap = 128;
//BB on heap, native order, has ByteBuffer, has MemReqSvr
ByteBuffer bb = ByteBuffer.allocate((int)cap);
bb.order(NBO);
bb.putShort(0, (short) 1);
WritableMemory mem = WritableMemory.writableWrap(bb, NBO, dummyMemReqSvr);
assertEquals(bb.isDirect(), mem.isDirectResource());
assertNotNull(((ResourceImpl)mem).getUnsafeObject());
checkCombinations(mem, off, cap, mem.isDirectResource(), mem.getByteOrder(), true, true);
//BB off heap, native order, has ByteBuffer, has MemReqSvr
ByteBuffer dbb = ByteBuffer.allocateDirect((int)cap);
dbb.order(NBO);
dbb.putShort(0, (short) 1);
mem = WritableMemory.writableWrap(dbb, NBO, dummyMemReqSvr);
assertEquals(dbb.isDirect(), mem.isDirectResource());
assertNull(((ResourceImpl)mem).getUnsafeObject());
checkCombinations(mem, off, cap, mem.isDirectResource(), mem.getByteOrder(), true, true);
//BB on heap, non native order, has ByteBuffer, has MemReqSvr
bb = ByteBuffer.allocate((int)cap);
bb.order(NNBO);
bb.putShort(0, (short) 1);
mem = WritableMemory.writableWrap(bb, NNBO, dummyMemReqSvr);
assertEquals(bb.isDirect(), mem.isDirectResource());
assertNotNull(((ResourceImpl)mem).getUnsafeObject());
checkCombinations(mem, off, cap, mem.isDirectResource(), mem.getByteOrder(), true, true);
//BB off heap, non native order, has ByteBuffer, has MemReqSvr
dbb = ByteBuffer.allocateDirect((int)cap);
dbb.order(NNBO);
dbb.putShort(0, (short) 1);
mem = WritableMemory.writableWrap(dbb, NNBO, dummyMemReqSvr);
assertEquals(dbb.isDirect(), mem.isDirectResource());
assertNull(((ResourceImpl)mem).getUnsafeObject());
checkCombinations(mem, off, cap, mem.isDirectResource(), mem.getByteOrder(), true, true);
}
@Test
public void checkMapLeaves() throws Exception {
long off = 0;
long cap = 128;
File file = new File("TestFile2.bin");
if (file.exists()) {
try {
java.nio.file.Files.delete(file.toPath());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
assertTrue(file.createNewFile());
assertTrue(file.setWritable(true, false)); //writable=true, ownerOnly=false
assertTrue(file.isFile());
file.deleteOnExit(); //comment out if you want to examine the file.
// Off Heap, Native order, No ByteBuffer, No MemReqSvr
try (WritableMemory memNO = WritableMemory.writableMap(file, off, cap, NBO)) {
memNO.putShort(0, (short) 1);
assertNull(((ResourceImpl)memNO).getUnsafeObject());
assertTrue(memNO.isDirectResource());
checkCombinations(memNO, off, cap, memNO.isDirectResource(), NBO, false, false);
}
// Off heap, Non Native order, No ByteBuffer, no MemReqSvr
try (WritableMemory memNNO = WritableMemory.writableMap(file, off, cap, NNBO)) {
memNNO.putShort(0, (short) 1);
assertNull(((ResourceImpl)memNNO).getUnsafeObject());
assertTrue(memNNO.isDirectResource());
checkCombinations(memNNO, off, cap, memNNO.isDirectResource(), NNBO, false, false);
}
}
@Test
public void checkHeapLeaves() {
long off = 0;
long cap = 128;
// On Heap, Native order, No ByteBuffer, No MemReqSvr
WritableMemory memNO = WritableMemory.allocate((int)cap); //assumes NBO
memNO.putShort(0, (short) 1);
assertNotNull(((ResourceImpl)memNO).getUnsafeObject());
assertFalse(memNO.isDirectResource());
checkCombinations(memNO, off, cap, memNO.isDirectResource(), NBO, false, false);
// On Heap, Non-native order, No ByteBuffer, No MemReqSvr
WritableMemory memNNO = WritableMemory.allocate((int)cap, NNBO);
memNNO.putShort(0, (short) 1);
assertNotNull(((ResourceImpl)memNNO).getUnsafeObject());
assertFalse(memNNO.isDirectResource());
checkCombinations(memNNO, off, cap, memNNO.isDirectResource(), NNBO, false, false);
}
private static void checkCombinations(WritableMemory mem, long off, long cap,
boolean direct, ByteOrder bo, boolean hasByteBuffer, boolean hasMemReqSvr) {
ByteOrder oo = otherByteOrder(bo);
assertEquals(mem.writableRegion(off, cap, bo).getShort(0), 1);
assertEquals(mem.writableRegion(off, cap, oo).getShort(0), 256);
assertEquals(mem.asWritableBuffer(bo).getShort(0), 1);
assertEquals(mem.asWritableBuffer(oo).getShort(0), 256);
assertEquals(mem.getTotalOffset(), 0);
ByteBuffer bb = ((ResourceImpl)mem).getByteBuffer();
assertTrue( hasByteBuffer ? bb != null : bb == null);
assertTrue(mem.getByteOrder() == bo);
if (hasMemReqSvr) { assertTrue(mem.getMemoryRequestServer() instanceof DummyMemoryRequestServer); }
else { assertNull(mem.getMemoryRequestServer()); }
Object obj = ((ResourceImpl)mem).getUnsafeObject();
if (direct) {
assertTrue(mem.isDirectResource());
assertNull(obj);
assertTrue(((ResourceImpl)mem).getCumulativeOffset(0) != 0);
} else {
assertFalse(mem.isDirectResource());
assertNotNull(obj);
assertTrue(((ResourceImpl)mem).getCumulativeOffset(0) != 0);
}
assertTrue(mem.isValid() == true);
WritableBuffer buf = mem.asWritableBuffer();
assertEquals(buf.writableRegion(off, cap, bo).getShort(0), 1);
assertEquals(buf.writableRegion(off, cap, oo).getShort(0), 256);
assertEquals(buf.writableDuplicate(bo).getShort(0), 1);
assertEquals(buf.writableDuplicate(oo).getShort(0), 256);
assertEquals(buf.getTotalOffset(), 0);
bb = ((ResourceImpl)buf).getByteBuffer();
assertTrue(hasByteBuffer ? bb != null : bb == null);
assertTrue(buf.getByteOrder() == bo);
if (hasMemReqSvr) { assertTrue(buf.getMemoryRequestServer() instanceof DummyMemoryRequestServer); }
else { assertNull(buf.getMemoryRequestServer()); }
obj = ((ResourceImpl)buf).getUnsafeObject();
if (direct) {
assertTrue(buf.isDirectResource());
assertNull(obj);
assertTrue(((ResourceImpl)buf).getCumulativeOffset(0) != 0);
} else {
assertFalse(buf.isDirectResource());
assertNotNull(obj);
assertTrue(((ResourceImpl)buf).getCumulativeOffset(0) != 0);
}
assertTrue(buf.isValid() == true);
WritableMemory nnMem = mem.writableRegion(off, cap, oo);
assertEquals(nnMem.writableRegion(off, cap, bo).getShort(0), 1);
assertEquals(nnMem.writableRegion(off, cap, oo).getShort(0), 256);
assertEquals(nnMem.asWritableBuffer(bo).getShort(0), 1);
assertEquals(nnMem.asWritableBuffer(oo).getShort(0), 256);
bb = ((ResourceImpl)nnMem).getByteBuffer();
assertTrue( hasByteBuffer ? bb != null : bb == null);
assertTrue(nnMem.getByteOrder() == oo);
if (hasMemReqSvr) { assertTrue(nnMem.getMemoryRequestServer() instanceof DummyMemoryRequestServer); }
obj = ((ResourceImpl)nnMem).getUnsafeObject();
if (direct) {
assertTrue(nnMem.isDirectResource());
assertNull(obj);
assertTrue(((ResourceImpl)nnMem).getCumulativeOffset(0) != 0);
} else {
assertFalse(nnMem.isDirectResource());
assertNotNull(obj);
assertTrue(((ResourceImpl)nnMem).getCumulativeOffset(0) != 0);
}
assertTrue(nnMem.isValid() == true);
WritableBuffer nnBuf = mem.asWritableBuffer(oo);
assertEquals(nnBuf.writableRegion(off, cap, bo).getShort(0), 1);
assertEquals(nnBuf.writableRegion(off, cap, oo).getShort(0), 256);
assertEquals(nnBuf.writableDuplicate(bo).getShort(0), 1);
assertEquals(nnBuf.writableDuplicate(oo).getShort(0), 256);
bb = ((ResourceImpl)nnBuf).getByteBuffer();
assertTrue( hasByteBuffer ? bb != null : bb == null);
assertTrue(nnBuf.getByteOrder() == oo);
if (hasMemReqSvr) { assertTrue(nnBuf.getMemoryRequestServer() instanceof DummyMemoryRequestServer); }
obj = ((ResourceImpl)nnBuf).getUnsafeObject();
if (direct) {
assertTrue(nnBuf.isDirectResource());
assertNull(obj);
assertTrue(((ResourceImpl)nnBuf).getCumulativeOffset(0) != 0);
} else {
assertFalse(nnBuf.isDirectResource());
assertNotNull(obj);
assertTrue(((ResourceImpl)nnBuf).getCumulativeOffset(0) != 0);
}
assertTrue(nnBuf.isValid() == true);
}
}
| 2,307 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/NonNativeWritableBufferImplTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.testng.Assert.assertEquals;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.Buffer;
import org.apache.datasketches.memory.WritableBuffer;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.annotations.Test;
/**
* @author Lee Rhodes
*/
public class NonNativeWritableBufferImplTest {
//Check primitives
@Test
public void checkCharacters() {
int n = 8;
int m = Character.BYTES;
byte[] arr1 = new byte[n * m]; //non-native
//put & get
WritableMemory wmem = WritableMemory.writableWrap(arr1, ByteOrder.BIG_ENDIAN);
WritableBuffer wbuf = wmem.asWritableBuffer();
char ch = 'a';
for (int i = 0; i < n; i++) { wbuf.putChar(i * m, ch++); }
ch = 'a';
for (int i = 0; i < n; i++) {
assertEquals(wbuf.getChar(i * m), ch++);
}
ch = 'a';
wbuf.setPosition(0);
for (int i = 0; i < n; i++) { wbuf.putChar(ch++); }
ch = 'a';
wbuf.setPosition(0);
for (int i = 0; i < n; i++) {
assertEquals(wbuf.getChar(), ch++);
}
//getArr & putArr
char[] cArr = new char[n]; //native
wbuf.setPosition(0);
wbuf.getCharArray(cArr, 0, n); //wmem is non-native
byte[] arr2 = new byte[n * m];
WritableMemory wmem2 = WritableMemory.writableWrap(arr2, ByteOrder.BIG_ENDIAN);
WritableBuffer wbuf2 = wmem2.asWritableBuffer();
wbuf2.putCharArray(cArr, 0, n);
assertEquals(arr2, arr1);
}
@Test
public void checkDoubles() {
int n = 8;
int m = Double.BYTES;
byte[] arr1 = new byte[n * m]; //non-native
//put & get
WritableMemory wmem = WritableMemory.writableWrap(arr1, ByteOrder.BIG_ENDIAN);
WritableBuffer wbuf = wmem.asWritableBuffer();
double dbl = 1.0;
for (int i = 0; i < n; i++) { wbuf.putDouble(i * m, dbl++); }
dbl = 1.0;
for (int i = 0; i < n; i++) {
assertEquals(wbuf.getDouble(i * m), dbl++);
}
dbl = 1.0;
wbuf.setPosition(0);
for (int i = 0; i < n; i++) { wbuf.putDouble(dbl++); }
dbl = 1.0;
wbuf.setPosition(0);
for (int i = 0; i < n; i++) {
assertEquals(wbuf.getDouble(), dbl++);
}
//getArr & putArr
double[] dblArr = new double[n]; //native
wbuf.setPosition(0);
wbuf.getDoubleArray(dblArr, 0, n); //wmem is non-native
byte[] arr2 = new byte[n * m];
WritableMemory wmem2 = WritableMemory.writableWrap(arr2, ByteOrder.BIG_ENDIAN);
WritableBuffer wbuf2 = wmem2.asWritableBuffer();
wbuf2.putDoubleArray(dblArr, 0, n);
assertEquals(arr2, arr1);
}
@Test
public void checkFloats() {
int n = 8;
int m = Float.BYTES;
byte[] arr1 = new byte[n * m]; //non-native
//put & get
WritableMemory wmem = WritableMemory.writableWrap(arr1, ByteOrder.BIG_ENDIAN);
WritableBuffer wbuf = wmem.asWritableBuffer();
float flt = 1.0F;
for (int i = 0; i < n; i++) { wbuf.putFloat(i * m, flt++); }
flt = 1.0F;
for (int i = 0; i < n; i++) {
assertEquals(wbuf.getFloat(i * m), flt++);
}
flt = 1.0F;
wbuf.setPosition(0);
for (int i = 0; i < n; i++) { wbuf.putFloat(flt++); }
flt = 1.0F;
wbuf.setPosition(0);
for (int i = 0; i < n; i++) {
assertEquals(wbuf.getFloat(), flt++);
}
//getArr & putArr
float[] fltArr = new float[n]; //native
wbuf.setPosition(0);
wbuf.getFloatArray(fltArr, 0, n); //wmem is non-native
byte[] arr2 = new byte[n * m];
WritableMemory wmem2 = WritableMemory.writableWrap(arr2, ByteOrder.BIG_ENDIAN);
WritableBuffer wbuf2 = wmem2.asWritableBuffer();
wbuf2.putFloatArray(fltArr, 0, n);
assertEquals(arr2, arr1);
}
@Test
public void checkInts() {
int n = 8;
int m = Integer.BYTES;
byte[] arr1 = new byte[n * m]; //non-native
//put & get
WritableMemory wmem = WritableMemory.writableWrap(arr1, ByteOrder.BIG_ENDIAN);
WritableBuffer wbuf = wmem.asWritableBuffer();
int intg = 1;
for (int i = 0; i < n; i++) { wbuf.putInt(i * m, intg++); }
intg = 1;
for (int i = 0; i < n; i++) {
assertEquals(wbuf.getInt(i * m), intg++);
}
intg = 1;
wbuf.setPosition(0);
for (int i = 0; i < n; i++) { wbuf.putInt(intg++); }
intg = 1;
wbuf.setPosition(0);
for (int i = 0; i < n; i++) {
assertEquals(wbuf.getInt(), intg++);
}
//getArr & putArr
int[] intArr = new int[n]; //native
wbuf.setPosition(0);
wbuf.getIntArray(intArr, 0, n); //wmem is non-native
byte[] arr2 = new byte[n * m];
WritableMemory wmem2 = WritableMemory.writableWrap(arr2, ByteOrder.BIG_ENDIAN);
WritableBuffer wbuf2 = wmem2.asWritableBuffer();
wbuf2.putIntArray(intArr, 0, n);
assertEquals(arr2, arr1);
}
@Test
public void checkLongs() {
int n = 8;
int m = Long.BYTES;
byte[] arr1 = new byte[n * m]; //non-native
//put & get
WritableMemory wmem = WritableMemory.writableWrap(arr1, ByteOrder.BIG_ENDIAN);
WritableBuffer wbuf = wmem.asWritableBuffer();
long lng = 1;
for (int i = 0; i < n; i++) { wbuf.putLong(i * m, lng++); }
lng = 1;
for (int i = 0; i < n; i++) {
assertEquals(wbuf.getLong(i * m), lng++);
}
lng = 1;
wbuf.setPosition(0);
for (int i = 0; i < n; i++) { wbuf.putLong(lng++); }
lng = 1;
wbuf.setPosition(0);
for (int i = 0; i < n; i++) {
assertEquals(wbuf.getLong(), lng++);
}
//getArr & putArr
long[] longArr = new long[n]; //native
wbuf.setPosition(0);
wbuf.getLongArray(longArr, 0, n); //wmem is non-native
byte[] arr2 = new byte[n * m];
WritableMemory wmem2 = WritableMemory.writableWrap(arr2, ByteOrder.BIG_ENDIAN);
WritableBuffer wbuf2 = wmem2.asWritableBuffer();
wbuf2.putLongArray(longArr, 0, n);
assertEquals(arr2, arr1);
}
@Test
public void checkShorts() {
int n = 8;
int m = Short.BYTES;
byte[] arr1 = new byte[n * m]; //non-native
//put & get
WritableMemory wmem = WritableMemory.writableWrap(arr1, ByteOrder.BIG_ENDIAN);
WritableBuffer wbuf = wmem.asWritableBuffer();
short sht = 1;
for (int i = 0; i < n; i++) { wbuf.putShort(i * m, sht++); }
sht = 1;
for (int i = 0; i < n; i++) {
assertEquals(wbuf.getShort(i * m), sht++);
}
sht = 1;
wbuf.setPosition(0);
for (int i = 0; i < n; i++) { wbuf.putShort(sht++); }
sht = 1;
wbuf.setPosition(0);
for (int i = 0; i < n; i++) {
assertEquals(wbuf.getShort(), sht++);
}
//getArr & putArr
short[] shortArr = new short[n]; //native
wbuf.setPosition(0);
wbuf.getShortArray(shortArr, 0, n); //wmem is non-native
byte[] arr2 = new byte[n * m];
WritableMemory wmem2 = WritableMemory.writableWrap(arr2, ByteOrder.BIG_ENDIAN);
WritableBuffer wbuf2 = wmem2.asWritableBuffer();
wbuf2.putShortArray(shortArr, 0, n);
assertEquals(arr2, arr1);
}
//check Duplicate, Region
@Test
public void checkDuplicate() {
byte[] bArr = new byte[8];
WritableMemory wmem = WritableMemory.writableWrap(bArr, ByteOrder.BIG_ENDIAN);
WritableBuffer wbuf = wmem.asWritableBuffer();
WritableBuffer wdup = wbuf.writableDuplicate();
assertEquals(wdup.getByteOrder(), ByteOrder.BIG_ENDIAN);
WritableBuffer wreg = wbuf.writableRegion();
assertEquals(wreg.getByteOrder(), ByteOrder.BIG_ENDIAN);
}
@Test
public void checkDuplicateZeros() {
byte[] bArr = new byte[0];
WritableMemory wmem = WritableMemory.writableWrap(bArr, ByteOrder.BIG_ENDIAN);
Buffer buf = wmem.asBuffer();
Buffer dup = buf.duplicate();
assertEquals(dup.getByteOrder(), ByteOrder.LITTLE_ENDIAN);
Buffer reg = buf.region();
assertEquals(reg.getByteOrder(), ByteOrder.LITTLE_ENDIAN);
}
}
| 2,308 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/MemoryWriteToTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.apache.datasketches.memory.internal.Util.UNSAFE_COPY_THRESHOLD_BYTES;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.channels.Channels;
import java.nio.channels.WritableByteChannel;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.Assert;
import org.testng.annotations.Test;
public class MemoryWriteToTest {
@Test
public void testOnHeap() throws IOException {
testWriteTo(createRandomBytesMemory(0));
testWriteTo(createRandomBytesMemory(7));
testWriteTo(createRandomBytesMemory(1023));
testWriteTo(createRandomBytesMemory(10_000));
testWriteTo(createRandomBytesMemory(UNSAFE_COPY_THRESHOLD_BYTES * 5));
testWriteTo(createRandomBytesMemory((UNSAFE_COPY_THRESHOLD_BYTES * 5) + 10));
}
@Test
public void testOnHeapInts() throws IOException {
testWriteTo(createRandomIntsMemory(0));
testWriteTo(createRandomIntsMemory(7));
testWriteTo(createRandomIntsMemory(1023));
testWriteTo(createRandomIntsMemory(10_000));
testWriteTo(createRandomIntsMemory(UNSAFE_COPY_THRESHOLD_BYTES * 5));
testWriteTo(createRandomIntsMemory((UNSAFE_COPY_THRESHOLD_BYTES * 5) + 10));
}
@Test
public void testOffHeap() throws Exception {
try (WritableMemory mem =
WritableMemory.allocateDirect((UNSAFE_COPY_THRESHOLD_BYTES * 5) + 10)) {
testWriteTo(mem.region(0, 0));
testOffHeap(mem, 7);
testOffHeap(mem, 1023);
testOffHeap(mem, 10_000);
testOffHeap(mem, UNSAFE_COPY_THRESHOLD_BYTES * 5);
testOffHeap(mem, (UNSAFE_COPY_THRESHOLD_BYTES * 5) + 10);
}
}
private static void testOffHeap(WritableMemory mem, int size) throws IOException {
createRandomBytesMemory(size).copyTo(0, mem, 0, size);
testWriteTo(mem.region(0, size));
}
private static Memory createRandomBytesMemory(int size) {
byte[] bytes = new byte[size];
ThreadLocalRandom.current().nextBytes(bytes);
return Memory.wrap(bytes);
}
private static Memory createRandomIntsMemory(int size) {
int[] ints = ThreadLocalRandom.current().ints(size).toArray();
return Memory.wrap(ints);
}
private static void testWriteTo(Memory mem) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try (WritableByteChannel out = Channels.newChannel(baos)) {
mem.writeTo(0, mem.getCapacity(), out);
}
byte[] result = baos.toByteArray();
Assert.assertTrue(mem.equalTo(Memory.wrap(result)));
}
}
| 2,309 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/MemoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Note: Lincoln's Gettysburg Address is in the public domain. See LICENSE.
*/
package org.apache.datasketches.memory.internal;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertNull;
import static org.testng.Assert.assertTrue;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.List;
import org.apache.datasketches.memory.DefaultMemoryRequestServer;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.Resource;
import org.apache.datasketches.memory.WritableBuffer;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import org.testng.collections.Lists;
public class MemoryTest {
private static final String LS = Util.LS;
@BeforeClass
public void setReadOnly() {
UtilTest.setGettysburgAddressFileToReadOnly();
}
@Test
public void checkDirectRoundTrip() throws Exception {
int n = 1024; //longs
try (WritableMemory mem = WritableMemory.allocateDirect(n * 8)) {
for (int i = 0; i < n; i++) {
mem.putLong(i * 8, i);
}
for (int i = 0; i < n; i++) {
long v = mem.getLong(i * 8);
assertEquals(v, i);
}
}
}
@Test
public void checkAutoHeapRoundTrip() {
int n = 1024; //longs
WritableMemory wmem = WritableMemory.allocate(n * 8);
for (int i = 0; i < n; i++) {
wmem.putLong(i * 8, i);
}
for (int i = 0; i < n; i++) {
long v = wmem.getLong(i * 8);
assertEquals(v, i);
}
}
@Test
public void checkArrayWrap() {
int n = 1024; //longs
byte[] arr = new byte[n * 8];
WritableMemory wmem = WritableMemory.writableWrap(arr);
for (int i = 0; i < n; i++) {
wmem.putLong(i * 8, i);
}
for (int i = 0; i < n; i++) {
long v = wmem.getLong(i * 8);
assertEquals(v, i);
}
Memory mem = Memory.wrap(arr, ByteOrder.nativeOrder());
for (int i = 0; i < n; i++) {
long v = mem.getLong(i * 8);
assertEquals(v, i);
}
// check 0 length array wraps
Memory memZeroLengthArrayBoolean = WritableMemory.writableWrap(new boolean[0]);
Memory memZeroLengthArrayByte = WritableMemory.writableWrap(new byte[0]);
Memory memZeroLengthArrayChar = WritableMemory.writableWrap(new char[0]);
Memory memZeroLengthArrayShort = WritableMemory.writableWrap(new short[0]);
Memory memZeroLengthArrayInt = WritableMemory.writableWrap(new int[0]);
Memory memZeroLengthArrayLong = WritableMemory.writableWrap(new long[0]);
Memory memZeroLengthArrayFloat = WritableMemory.writableWrap(new float[0]);
Memory memZeroLengthArrayDouble = WritableMemory.writableWrap(new double[0]);
assertEquals(memZeroLengthArrayBoolean.getCapacity(), 0);
assertEquals(memZeroLengthArrayByte.getCapacity(), 0);
assertEquals(memZeroLengthArrayChar.getCapacity(), 0);
assertEquals(memZeroLengthArrayShort.getCapacity(), 0);
assertEquals(memZeroLengthArrayInt.getCapacity(), 0);
assertEquals(memZeroLengthArrayLong.getCapacity(), 0);
assertEquals(memZeroLengthArrayFloat.getCapacity(), 0);
assertEquals(memZeroLengthArrayDouble.getCapacity(), 0);
// check 0 length array wraps
List<Memory> memoryToCheck = Lists.newArrayList();
memoryToCheck.add(WritableMemory.allocate(0));
memoryToCheck.add(WritableMemory.writableWrap(ByteBuffer.allocate(0)));
memoryToCheck.add(WritableMemory.writableWrap(new boolean[0]));
memoryToCheck.add(WritableMemory.writableWrap(new byte[0]));
memoryToCheck.add(WritableMemory.writableWrap(new char[0]));
memoryToCheck.add(WritableMemory.writableWrap(new short[0]));
memoryToCheck.add(WritableMemory.writableWrap(new int[0]));
memoryToCheck.add(WritableMemory.writableWrap(new long[0]));
memoryToCheck.add(WritableMemory.writableWrap(new float[0]));
memoryToCheck.add(WritableMemory.writableWrap(new double[0]));
memoryToCheck.add(Memory.wrap(ByteBuffer.allocate(0)));
memoryToCheck.add(Memory.wrap(new boolean[0]));
memoryToCheck.add(Memory.wrap(new byte[0]));
memoryToCheck.add(Memory.wrap(new char[0]));
memoryToCheck.add(Memory.wrap(new short[0]));
memoryToCheck.add(Memory.wrap(new int[0]));
memoryToCheck.add(Memory.wrap(new long[0]));
memoryToCheck.add(Memory.wrap(new float[0]));
memoryToCheck.add(Memory.wrap(new double[0]));
//Check the Memory lengths
for (Memory memory : memoryToCheck) {
assertEquals(memory.getCapacity(), 0);
}
}
@Test
public void checkByteBufHeap() {
int n = 1024; //longs
byte[] arr = new byte[n * 8];
ByteBuffer bb = ByteBuffer.wrap(arr);
bb.order(ByteOrder.nativeOrder());
WritableMemory wmem = WritableMemory.writableWrap(bb);
for (int i = 0; i < n; i++) { //write to wmem
wmem.putLong(i * 8, i);
}
for (int i = 0; i < n; i++) { //read from wmem
long v = wmem.getLong(i * 8);
assertEquals(v, i);
}
for (int i = 0; i < n; i++) { //read from BB
long v = bb.getLong(i * 8);
assertEquals(v, i);
}
Memory mem1 = Memory.wrap(arr);
for (int i = 0; i < n; i++) { //read from wrapped arr
long v = mem1.getLong(i * 8);
assertEquals(v, i);
}
//convert to RO
Memory mem = wmem;
for (int i = 0; i < n; i++) {
long v = mem.getLong(i * 8);
assertEquals(v, i);
}
}
@Test
public void checkByteBufDirect() {
int n = 1024; //longs
ByteBuffer bb = ByteBuffer.allocateDirect(n * 8);
bb.order(ByteOrder.nativeOrder());
WritableMemory wmem = WritableMemory.writableWrap(bb);
for (int i = 0; i < n; i++) { //write to wmem
wmem.putLong(i * 8, i);
}
for (int i = 0; i < n; i++) { //read from wmem
long v = wmem.getLong(i * 8);
assertEquals(v, i);
}
for (int i = 0; i < n; i++) { //read from BB
long v = bb.getLong(i * 8);
assertEquals(v, i);
}
Memory mem1 = Memory.wrap(bb);
for (int i = 0; i < n; i++) { //read from wrapped bb RO
long v = mem1.getLong(i * 8);
assertEquals(v, i);
}
//convert to RO
Memory mem = wmem;
for (int i = 0; i < n; i++) {
long v = mem.getLong(i * 8);
assertEquals(v, i);
}
}
@Test
public void checkByteBufWrongOrder() {
int n = 1024; //longs
ByteBuffer bb = ByteBuffer.allocate(n * 8);
bb.order(ByteOrder.BIG_ENDIAN);
Memory mem = Memory.wrap(bb);
assertFalse(mem.getByteOrder() == ByteOrder.nativeOrder());
assertEquals(mem.getByteOrder(), ByteOrder.BIG_ENDIAN);
}
@Test
public void checkReadOnlyHeapByteBuffer() {
ByteBuffer bb = ByteBuffer.allocate(128);
bb.order(ByteOrder.nativeOrder());
for (int i = 0; i < 128; i++) { bb.put(i, (byte)i); }
bb.position(64);
ByteBuffer slice = bb.slice().asReadOnlyBuffer();
slice.order(ByteOrder.nativeOrder());
Memory mem = Memory.wrap(slice);
for (int i = 0; i < 64; i++) {
assertEquals(mem.getByte(i), 64 + i);
}
mem.toHexString("slice", 0, slice.capacity());
//println(s);
}
@Test
public void checkPutGetArraysHeap() {
int n = 1024; //longs
long[] arr = new long[n];
for (int i = 0; i < n; i++) { arr[i] = i; }
WritableMemory wmem = WritableMemory.allocate(n * 8);
wmem.putLongArray(0, arr, 0, n);
long[] arr2 = new long[n];
wmem.getLongArray(0, arr2, 0, n);
for (int i = 0; i < n; i++) {
assertEquals(arr2[i], i);
}
}
@Test
public void checkRORegions() {
int n = 16;
int n2 = n / 2;
long[] arr = new long[n];
for (int i = 0; i < n; i++) { arr[i] = i; }
Memory mem = Memory.wrap(arr);
Memory reg = mem.region(n2 * 8, n2 * 8); //top half
for (int i = 0; i < n2; i++) {
long v = reg.getLong(i * 8);
long e = i + n2;
assertEquals(v, e);
}
}
@Test
public void checkRORegionsReverseBO() {
int n = 16;
int n2 = n / 2;
long[] arr = new long[n];
for (int i = 0; i < n; i++) { arr[i] = i; }
Memory mem = Memory.wrap(arr);
Memory reg = mem.region(n2 * 8, n2 * 8, Util.NON_NATIVE_BYTE_ORDER); //top half
for (int i = 0; i < n2; i++) {
long v = Long.reverseBytes(reg.getLong(i * 8));
long e = i + n2;
assertEquals(v, e);
}
}
@Test
public void checkWRegions() {
int n = 16;
int n2 = n / 2;
long[] arr = new long[n];
for (int i = 0; i < n; i++) { arr[i] = i; }
WritableMemory wmem = WritableMemory.writableWrap(arr);
for (int i = 0; i < n; i++) {
assertEquals(wmem.getLong(i * 8), i);
//println("" + wmem.getLong(i * 8));
}
//println("");
WritableMemory reg = wmem.writableRegion(n2 * 8, n2 * 8);
for (int i = 0; i < n2; i++) { reg.putLong(i * 8, i); }
for (int i = 0; i < n; i++) {
assertEquals(wmem.getLong(i * 8), i % 8);
//println("" + wmem.getLong(i * 8));
}
}
@Test
public void checkWRegionsReverseBO() {
int n = 16;
int n2 = n / 2;
long[] arr = new long[n];
for (int i = 0; i < n; i++) { arr[i] = i; }
WritableMemory wmem = WritableMemory.writableWrap(arr);
for (int i = 0; i < n; i++) {
assertEquals(wmem.getLong(i * 8), i);
//println("" + wmem.getLong(i * 8));
}
//println("");
WritableMemory reg = wmem.writableRegion(n2 * 8, n2 * 8, Util.NON_NATIVE_BYTE_ORDER);
for (int i = 0; i < n2; i++) { reg.putLong(i * 8, i); }
for (int i = 0; i < n; i++) {
long v = wmem.getLong(i * 8);
if (i < n2) {
assertEquals(v, i % 8);
} else {
assertEquals(Long.reverseBytes(v), i % 8);
}
//println("" + wmem.getLong(i * 8));
}
}
@Test(expectedExceptions = IllegalStateException.class)
public void checkParentUseAfterFree() throws Exception {
int bytes = 64 * 8;
WritableMemory wmem = WritableMemory.allocateDirect(bytes);
wmem.close();
wmem.getLong(0);
}
@Test(expectedExceptions = IllegalStateException.class)
public void checkRegionUseAfterFree() throws Exception {
int bytes = 64;
Memory mem = WritableMemory.allocateDirect(bytes);
Memory region = mem.region(0L, bytes);
mem.close();
region.getByte(0);
}
@Test
public void checkMemReqSvr() throws Exception {
WritableMemory wmem;
WritableBuffer wbuf;
if (Resource.defaultMemReqSvr == null) { //This is a policy choice
//ON HEAP
wmem = WritableMemory.writableWrap(new byte[16]);
assertNull(wmem.getMemoryRequestServer());
wbuf = wmem.asWritableBuffer();
assertNull(wbuf.getMemoryRequestServer());
//OFF HEAP
try (WritableMemory wmem2 = WritableMemory.allocateDirect(16)) { //OFF HEAP
assertNull(wmem2.getMemoryRequestServer());
wbuf = wmem2.asWritableBuffer();
assertNull(wbuf.getMemoryRequestServer());
}
//ByteBuffer
ByteBuffer bb = ByteBuffer.allocate(16);
wmem = WritableMemory.writableWrap(bb);
assertNull(wmem.getMemoryRequestServer());
wbuf = wmem.asWritableBuffer();
assertNull(wbuf.getMemoryRequestServer());
}
MemoryRequestServer memReqSvr = new DefaultMemoryRequestServer();
//ON HEAP
wmem = WritableMemory.writableWrap(new byte[16], 0, 16, Util.NATIVE_BYTE_ORDER, memReqSvr);
assertNotNull(wmem.getMemoryRequestServer());
wbuf = wmem.asWritableBuffer();
assertNotNull(wbuf.getMemoryRequestServer());
//OFF HEAP
try (WritableMemory wmem3 = WritableMemory.allocateDirect(16, Util.NATIVE_BYTE_ORDER, memReqSvr)) {
assertNotNull(wmem3.getMemoryRequestServer());
wbuf = wmem.asWritableBuffer();
assertNotNull(wbuf.getMemoryRequestServer());
}
//ByteBuffer
ByteBuffer bb = ByteBuffer.allocate(16);
wmem = WritableMemory.writableWrap(bb, Util.NATIVE_BYTE_ORDER, memReqSvr);
assertNotNull(wmem.getMemoryRequestServer());
wbuf = wmem.asWritableBuffer();
assertNotNull(wbuf.getMemoryRequestServer());
}
@Test
public void checkSelfEqualsToAndCompareTo() {
int len = 64;
WritableMemory wmem = WritableMemory.allocate(len);
for (int i = 0; i < len; i++) { wmem.putByte(i, (byte) i); }
assertTrue(wmem.equalTo(0, wmem, 0, len));
assertFalse(wmem.equalTo(0, wmem, len / 2, len / 2));
assertEquals(wmem.compareTo(0, len, wmem, 0, len), 0);
assertTrue(wmem.compareTo(0, 0, wmem, len / 2, len / 2) < 0);
}
@Test
public void wrapBigEndianAsLittle() {
ByteBuffer bb = ByteBuffer.allocate(64);
bb.putChar(0, (char)1); //as NNO
Memory mem = Memory.wrap(bb, ByteOrder.LITTLE_ENDIAN);
assertEquals(mem.getChar(0), 256);
}
@Test
public void printlnTest() {
println("PRINTING: " + this.getClass().getName());
}
static void println(final Object o) {
if (o == null) { print(LS); }
else { print(o.toString() + LS); }
}
/**
* @param o value to print
*/
static void print(final Object o) {
if (o != null) {
//System.out.print(o.toString()); //disable here
}
}
}
| 2,310 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/ThreadTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.apache.datasketches.memory.internal.Util.getResourceFile;
import static org.testng.Assert.assertTrue;
import static org.testng.Assert.fail;
import java.io.File;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class ThreadTest {
File file;
Memory mem;
WritableMemory wmem;
Thread altThread;
@BeforeClass
public void prepareFileAndMemory() {
UtilTest.setGettysburgAddressFileToReadOnly();
file = getResourceFile("GettysburgAddress.txt");
assertTrue(AllocateDirectWritableMap.isFileReadOnly(file));
}
void initMap() {
mem = Memory.map(file); assertTrue(mem.isValid());
}
void initDirectMem() {
wmem = WritableMemory.allocateDirect(1024); assertTrue(wmem.isValid());
}
Runnable tryMapClose = () -> {
try { mem.close(); fail(); }
catch (IllegalStateException expected) { }
};
Runnable tryDirectClose = () -> {
try { wmem.close(); fail(); }
catch (IllegalStateException expected) { }
};
@Test
public void runTests() {
initMap();
altThread = new Thread(tryMapClose, "altThread"); altThread.start();
mem.close();
initDirectMem();
altThread = new Thread(tryDirectClose, "altThread"); altThread.start();
wmem.close();
}
}
| 2,311 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/UnsafeUtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertTrue;
import static org.testng.Assert.fail;
import java.util.ArrayList;
import java.util.List;
import org.testng.annotations.Test;
/**
* @author Lee Rhodes
*/
public class UnsafeUtilTest {
long testField = 1; //Do not remove & cannot be static. Used in reflection check.
@Test
public void checkJdkString() {
String jdkVer;
int[] p = new int[2];
String[] good1_Strings = {"1.8.0_121", "8", "9", "10", "11", "12", "13"};
int len = good1_Strings.length;
for (int i = 0; i < len; i++) {
jdkVer = good1_Strings[i];
p = UnsafeUtil.parseJavaVersion(jdkVer);
UnsafeUtil.checkJavaVersion(jdkVer, p[0], p[1]);
int jdkMajor = (p[0] == 1) ? p[1] : p[0]; //model the actual JDK_MAJOR
if (p[0] == 1) { assertTrue(jdkMajor == p[1]); }
if (p[0] > 1 ) { assertTrue(jdkMajor == p[0]); }
}
try {
jdkVer = "14.0.4"; //ver 14 string
p = UnsafeUtil.parseJavaVersion(jdkVer);
UnsafeUtil.checkJavaVersion(jdkVer, p[0], p[1]);
fail();
} catch (IllegalArgumentException e) {
println("" + e);
}
try {
jdkVer = "1.7.0_80"; //1.7 string
p = UnsafeUtil.parseJavaVersion(jdkVer);
UnsafeUtil.checkJavaVersion(jdkVer, p[0], p[1]);
fail();
} catch (IllegalArgumentException e) {
println("" + e);
}
try {
jdkVer = "1.6.0_65"; //valid string but < 1.7
p = UnsafeUtil.parseJavaVersion(jdkVer);
UnsafeUtil.checkJavaVersion(jdkVer, p[0], p[1]); //throws
fail();
} catch (IllegalArgumentException e) {
println("" + e);
}
try {
jdkVer = "b"; //invalid string
p = UnsafeUtil.parseJavaVersion(jdkVer);
UnsafeUtil.checkJavaVersion(jdkVer, p[0], p[1]); //throws
fail();
} catch (IllegalArgumentException e) {
println("" + e);
}
try {
jdkVer = ""; //invalid string
p = UnsafeUtil.parseJavaVersion(jdkVer);
UnsafeUtil.checkJavaVersion(jdkVer, p[0], p[1]); //throws
fail();
} catch (IllegalArgumentException e) {
println("" + e);
}
}
@Test
public void checkFieldOffset() {
assertEquals(testField, 1);
long offset = UnsafeUtil.getFieldOffset(this.getClass(), "testField");
assertEquals(offset, 16);
try {
offset = UnsafeUtil.getFieldOffset(this.getClass(), "testField2");
fail();
} catch (IllegalStateException e) {
//OK
}
}
@Test(expectedExceptions = IllegalArgumentException.class)
public void checkInts() {
Ints.checkedCast(1L << 32);
}
@SuppressWarnings("restriction")
@Test
public void checkArrayBaseOffset()
{
final List<Class<?>> classes = new ArrayList<>();
classes.add(byte[].class);
classes.add(int[].class);
classes.add(long[].class);
classes.add(float[].class);
classes.add(double[].class);
classes.add(boolean[].class);
classes.add(short[].class);
classes.add(char[].class);
classes.add(Object[].class);
classes.add(byte[][].class); // An array type that is not cached
for (Class<?> clazz : classes) {
assertEquals(
UnsafeUtil.getArrayBaseOffset(clazz),
UnsafeUtil.unsafe.arrayBaseOffset(clazz),
clazz.getTypeName()
);
}
}
@Test
public void printlnTest() {
println("PRINTING: " + this.getClass().getName());
}
/**
* @param s String to print
*/
static void println(final String s) {
//System.out.println(s);
}
}
| 2,312 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/NativeWritableMemoryImplTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertTrue;
import static org.testng.Assert.fail;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.Buffer;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.MemoryBoundsException;
import org.apache.datasketches.memory.ReadOnlyException;
import org.apache.datasketches.memory.WritableBuffer;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.annotations.Test;
public class NativeWritableMemoryImplTest {
//Simple Native direct
@Test
public void checkNativeCapacityAndClose() {
int memCapacity = 64;
WritableMemory mem = WritableMemory.allocateDirect(memCapacity);
assertEquals(memCapacity, mem.getCapacity());
mem.close(); //intentional
assertFalse(mem.isValid());
}
//Simple Native arrays
@Test
public void checkBooleanArray() {
boolean[] srcArray = { true, false, true, false, false, true, true, false };
boolean[] dstArray = new boolean[8];
Memory mem = Memory.wrap(srcArray);
mem.getBooleanArray(0, dstArray, 0, 8);
for (int i = 0; i < 8; i++) {
assertEquals(dstArray[i], srcArray[i]);
}
WritableMemory wmem = WritableMemory.writableWrap(srcArray);
wmem.getBooleanArray(0, dstArray, 0, 8);
for (int i = 0; i < 8; i++) {
assertEquals(dstArray[i], srcArray[i]);
}
assertTrue(mem.isHeapResource());
}
@Test
public void checkByteArray() {
byte[] srcArray = { 1, -2, 3, -4, 5, -6, 7, -8 };
byte[] dstArray = new byte[8];
Memory mem = Memory.wrap(srcArray);
mem.getByteArray(0, dstArray, 0, 8);
for (int i = 0; i < 8; i++) {
assertEquals(dstArray[i], srcArray[i]);
}
WritableMemory wmem = WritableMemory.writableWrap(srcArray);
wmem.getByteArray(0, dstArray, 0, 8);
for (int i = 0; i < 8; i++) {
assertEquals(dstArray[i], srcArray[i]);
}
}
@Test
public void checkCharArray() {
char[] srcArray = { 1, 2, 3, 4, 5, 6, 7, 8 };
char[] dstArray = new char[8];
Memory mem = Memory.wrap(srcArray);
mem.getCharArray(0, dstArray, 0, 8);
for (int i = 0; i < 8; i++) {
assertEquals(dstArray[i], srcArray[i]);
}
WritableMemory wmem = WritableMemory.writableWrap(srcArray);
wmem.getCharArray(0, dstArray, 0, 8);
for (int i = 0; i < 8; i++) {
assertEquals(dstArray[i], srcArray[i]);
}
}
@Test
public void checkShortArray() {
short[] srcArray = { 1, -2, 3, -4, 5, -6, 7, -8 };
short[] dstArray = new short[8];
Memory mem = Memory.wrap(srcArray);
mem.getShortArray(0, dstArray, 0, 8);
for (int i = 0; i < 8; i++) {
assertEquals(dstArray[i], srcArray[i]);
}
WritableMemory wmem = WritableMemory.writableWrap(srcArray);
wmem.getShortArray(0, dstArray, 0, 8);
for (int i = 0; i < 8; i++) {
assertEquals(dstArray[i], srcArray[i]);
}
}
@Test
public void checkIntArray() {
int[] srcArray = { 1, -2, 3, -4, 5, -6, 7, -8 };
int[] dstArray = new int[8];
Memory mem = Memory.wrap(srcArray);
mem.getIntArray(0, dstArray, 0, 8);
for (int i = 0; i < 8; i++) {
assertEquals(dstArray[i], srcArray[i]);
}
WritableMemory wmem = WritableMemory.writableWrap(srcArray);
wmem.getIntArray(0, dstArray, 0, 8);
for (int i = 0; i < 8; i++) {
assertEquals(dstArray[i], srcArray[i]);
}
}
@Test
public void checkLongArray() {
long[] srcArray = { 1, -2, 3, -4, 5, -6, 7, -8 };
long[] dstArray = new long[8];
Memory mem = Memory.wrap(srcArray);
mem.getLongArray(0, dstArray, 0, 8);
for (int i = 0; i < 8; i++) {
assertEquals(dstArray[i], srcArray[i]);
}
WritableMemory wmem = WritableMemory.writableWrap(srcArray);
wmem.getLongArray(0, dstArray, 0, 8);
for (int i = 0; i < 8; i++) {
assertEquals(dstArray[i], srcArray[i]);
}
}
@Test
public void checkFloatArray() {
float[] srcArray = { 1, -2, 3, -4, 5, -6, 7, -8 };
float[] dstArray = new float[8];
Memory mem = Memory.wrap(srcArray);
mem.getFloatArray(0, dstArray, 0, 8);
for (int i = 0; i < 8; i++) {
assertEquals(dstArray[i], srcArray[i]);
}
WritableMemory wmem = WritableMemory.writableWrap(srcArray);
wmem.getFloatArray(0, dstArray, 0, 8);
for (int i = 0; i < 8; i++) {
assertEquals(dstArray[i], srcArray[i]);
}
}
@Test
public void checkDoubleArray() {
double[] srcArray = { 1, -2, 3, -4, 5, -6, 7, -8 };
double[] dstArray = new double[8];
Memory mem = Memory.wrap(srcArray);
mem.getDoubleArray(0, dstArray, 0, 8);
for (int i = 0; i < 8; i++) {
assertEquals(dstArray[i], srcArray[i]);
}
WritableMemory wmem = WritableMemory.writableWrap(srcArray);
wmem.getDoubleArray(0, dstArray, 0, 8);
for (int i = 0; i < 8; i++) {
assertEquals(dstArray[i], srcArray[i]);
}
}
@Test
public void checkNativeBaseBound() {
int memCapacity = 64;
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
mem.toHexString("Force Assertion Error", memCapacity, 8);
fail("Should have thrown MemoryBoundsException");
} catch (MemoryBoundsException e) { //bounds exception
//ok
}
}
@Test
public void checkNativeSrcArrayBound() {
long memCapacity = 64;
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
byte[] srcArray = { 1, -2, 3, -4 };
mem.putByteArray(0L, srcArray, 0, 5);
fail("Should have thrown MemoryBoundsException");
} catch (MemoryBoundsException e) {
//pass
}
}
//Copy Within tests
@Test(expectedExceptions = IllegalArgumentException.class)
public void checkDegenerateCopyTo() {
WritableMemory wmem = WritableMemory.allocate(64);
wmem.copyTo(0, wmem, 0, 64); //Attempt to copy a block of memory exactly in-place, should be a bug
}
@Test
public void checkCopyWithinNativeSmall() {
int memCapacity = 64;
int half = memCapacity / 2;
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
mem.clear();
for (int i = 0; i < half; i++) { //fill first half
mem.putByte(i, (byte) i);
}
mem.copyTo(0, mem, half, half);
for (int i = 0; i < half; i++) {
assertEquals(mem.getByte(i + half), (byte) i);
}
}
}
@Test
public void checkCopyWithinNativeLarge() {
int memCapacity = (2 << 20) + 64;
int memCapLongs = memCapacity / 8;
int halfBytes = memCapacity / 2;
int halfLongs = memCapLongs / 2;
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
mem.clear();
for (int i = 0; i < halfLongs; i++) {
mem.putLong(i * 8, i);
}
mem.copyTo(0, mem, halfBytes, halfBytes);
for (int i = 0; i < halfLongs; i++) {
assertEquals(mem.getLong((i + halfLongs) * 8), i);
}
}
}
@Test
public void checkCopyWithinNativeSrcBound() {
int memCapacity = 64;
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
mem.copyTo(32, mem, 32, 33); //hit source bound check
fail("Should have thrown MemoryBoundsException");
}
catch (MemoryBoundsException e) {
//pass
}
}
@Test
public void checkCopyWithinNativeDstBound() {
int memCapacity = 64;
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
mem.copyTo(0, mem, 32, 33); //hit dst bound check
fail("Should have thrown MemoryBoundsException");
}
catch (MemoryBoundsException e) {
//pass
}
}
@Test
public void checkCopyCrossNativeSmall() {
int memCapacity = 64;
try (WritableMemory mem1 = WritableMemory.allocateDirect(memCapacity);
WritableMemory mem2 = WritableMemory.allocateDirect(memCapacity))
{
for (int i = 0; i < memCapacity; i++) {
mem1.putByte(i, (byte) i);
}
mem2.clear();
mem1.copyTo(0, mem2, 0, memCapacity);
for (int i = 0; i < memCapacity; i++) {
assertEquals(mem2.getByte(i), (byte) i);
}
}
}
@Test
public void checkCopyCrossNativeLarge() {
int memCapacity = (2 << 20) + 64;
int memCapLongs = memCapacity / 8;
try (WritableMemory mem1 = WritableMemory.allocateDirect(memCapacity);
WritableMemory mem2 = WritableMemory.allocateDirect(memCapacity))
{
for (int i = 0; i < memCapLongs; i++) {
mem1.putLong(i * 8, i);
}
mem2.clear();
mem1.copyTo(0, mem2, 0, memCapacity);
for (int i = 0; i < memCapLongs; i++) {
assertEquals(mem2.getLong(i * 8), i);
}
}
}
@Test
public void checkCopyCrossNativeAndByteArray() {
int memCapacity = 64;
try (WritableMemory mem1 = WritableMemory.allocateDirect(memCapacity)) {
for (int i = 0; i < mem1.getCapacity(); i++) {
mem1.putByte(i, (byte) i);
}
WritableMemory mem2 = WritableMemory.allocate(memCapacity);
mem1.copyTo(8, mem2, 16, 16);
for (int i = 0; i < 16; i++) {
assertEquals(mem1.getByte(8 + i), mem2.getByte(16 + i));
}
//println(mem2.toHexString("Mem2", 0, (int)mem2.getCapacity()));
}
}
@Test
public void checkCopyCrossRegionsSameNative() {
int memCapacity = 128;
try (WritableMemory mem1 = WritableMemory.allocateDirect(memCapacity)) {
for (int i = 0; i < mem1.getCapacity(); i++) {
mem1.putByte(i, (byte) i);
}
//println(mem1.toHexString("Mem1", 0, (int)mem1.getCapacity()));
Memory reg1 = mem1.region(8, 16);
//println(reg1.toHexString("Reg1", 0, (int)reg1.getCapacity()));
WritableMemory reg2 = mem1.writableRegion(24, 16);
//println(reg2.toHexString("Reg2", 0, (int)reg2.getCapacity()));
reg1.copyTo(0, reg2, 0, 16);
for (int i = 0; i < 16; i++) {
assertEquals(reg1.getByte(i), reg2.getByte(i));
assertEquals(mem1.getByte(8 + i), mem1.getByte(24 + i));
}
//println(mem1.toHexString("Mem1", 0, (int)mem1.getCapacity()));
}
}
@Test
public void checkCopyCrossNativeArrayAndHierarchicalRegions() {
int memCapacity = 64;
try (WritableMemory mem1 = WritableMemory.allocateDirect(memCapacity)) {
for (int i = 0; i < mem1.getCapacity(); i++) { //fill with numbers
mem1.putByte(i, (byte) i);
}
//println(mem1.toHexString("Mem1", 0, (int)mem1.getCapacity()));
WritableMemory mem2 = WritableMemory.allocate(memCapacity);
Memory reg1 = mem1.region(8, 32);
Memory reg1B = reg1.region(8, 16);
//println(reg1.toHexString("Reg1", 0, (int)reg1.getCapacity()));
//println(reg1B.toHexString("Reg1B", 0, (int)reg1B.getCapacity()));
WritableMemory reg2 = mem2.writableRegion(32, 16);
reg1B.copyTo(0, reg2, 0, 16);
//println(reg2.toHexString("Reg2", 0, (int)reg2.getCapacity()));
//println(mem2.toHexString("Mem2", 0, (int)mem2.getCapacity()));
for (int i = 32, j = 16; i < 40; i++, j++) {
assertEquals(mem2.getByte(i), j);
}
}
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void checkRegionBounds() {
int memCapacity = 64;
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
mem.writableRegion(1, 64);
}
}
@Test
public void checkByteBufferWrap() {
int memCapacity = 64;
ByteBuffer byteBuf = ByteBuffer.allocate(memCapacity);
byteBuf.order(ByteOrder.nativeOrder());
for (int i = 0; i < memCapacity; i++) {
byteBuf.put(i, (byte) i);
}
WritableMemory wmem = WritableMemory.writableWrap(byteBuf);
for (int i = 0; i < memCapacity; i++) {
assertEquals(wmem.getByte(i), byteBuf.get(i));
}
assertTrue(wmem.isByteBufferResource());
ByteBuffer byteBuf2 = ((ResourceImpl)wmem).getByteBuffer();
assertEquals(byteBuf2, byteBuf);
//println( mem.toHexString("HeapBB", 0, memCapacity));
}
@Test
public void checkWrapWithBBReadonly1() {
int memCapacity = 64;
ByteBuffer byteBuf = ByteBuffer.allocate(memCapacity);
byteBuf.order(ByteOrder.nativeOrder());
for (int i = 0; i < memCapacity; i++) {
byteBuf.put(i, (byte) i);
}
Memory mem = WritableMemory.writableWrap(byteBuf);
for (int i = 0; i < memCapacity; i++) {
assertEquals(mem.getByte(i), byteBuf.get(i));
}
//println(mem.toHexString("HeapBB", 0, memCapacity));
}
@Test(expectedExceptions = ReadOnlyException.class)
public void checkWrapWithBBReadonly2() {
int memCapacity = 64;
ByteBuffer byteBuf = ByteBuffer.allocate(memCapacity);
byteBuf.order(ByteOrder.nativeOrder());
ByteBuffer byteBufRO = byteBuf.asReadOnlyBuffer();
WritableMemory.writableWrap(byteBufRO);
}
@Test
public void checkWrapWithDirectBBReadonly() {
int memCapacity = 64;
ByteBuffer byteBuf = ByteBuffer.allocateDirect(memCapacity);
byteBuf.order(ByteOrder.nativeOrder());
for (int i = 0; i < memCapacity; i++) {
byteBuf.put(i, (byte) i);
}
ByteBuffer byteBufRO = byteBuf.asReadOnlyBuffer();
byteBufRO.order(ByteOrder.nativeOrder());
Memory mem = Memory.wrap(byteBufRO);
for (int i = 0; i < memCapacity; i++) {
assertEquals(mem.getByte(i), byteBuf.get(i));
}
//println(mem.toHexString("HeapBB", 0, memCapacity));
}
@Test(expectedExceptions = ReadOnlyException.class)
public void checkWrapWithDirectBBReadonlyPut() {
int memCapacity = 64;
ByteBuffer byteBuf = ByteBuffer.allocateDirect(memCapacity);
ByteBuffer byteBufRO = byteBuf.asReadOnlyBuffer();
byteBufRO.order(ByteOrder.nativeOrder());
WritableMemory.writableWrap(byteBufRO);
}
@Test
public void checkByteBufferWrapDirectAccess() {
int memCapacity = 64;
ByteBuffer byteBuf = ByteBuffer.allocateDirect(memCapacity);
byteBuf.order(ByteOrder.nativeOrder());
for (int i = 0; i < memCapacity; i++) {
byteBuf.put(i, (byte) i);
}
Memory mem = Memory.wrap(byteBuf);
for (int i = 0; i < memCapacity; i++) {
assertEquals(mem.getByte(i), byteBuf.get(i));
}
//println( mem.toHexString("HeapBB", 0, memCapacity));
}
@Test
public void checkIsDirect() {
int memCapacity = 64;
WritableMemory mem = WritableMemory.allocate(memCapacity);
assertFalse(mem.isDirectResource());
try (WritableMemory mem1 = WritableMemory.allocateDirect(memCapacity)) {
assertTrue(mem1.isDirectResource());
}
}
@Test
public void checkIsReadOnly() {
long[] srcArray = { 1, -2, 3, -4, 5, -6, 7, -8 };
WritableMemory wmem = WritableMemory.writableWrap(srcArray);
assertFalse(wmem.isReadOnly());
Memory memRO = wmem;
assertFalse(memRO.isReadOnly());
for (int i = 0; i < wmem.getCapacity(); i++) {
assertEquals(wmem.getByte(i), memRO.getByte(i));
}
}
@Test
public void checkGoodBounds() {
ResourceImpl.checkBounds(50, 50, 100);
}
@Test
public void checkCompareToHeap() {
byte[] arr1 = new byte[] {0, 1, 2, 3};
byte[] arr2 = new byte[] {0, 1, 2, 4};
byte[] arr3 = new byte[] {0, 1, 2, 3, 4};
Memory mem1 = Memory.wrap(arr1);
Memory mem2 = Memory.wrap(arr2);
Memory mem3 = Memory.wrap(arr3);
Memory mem4 = Memory.wrap(arr3); //same resource
int comp = mem1.compareTo(0, 3, mem2, 0, 3);
assertEquals(comp, 0);
comp = mem1.compareTo(0, 4, mem2, 0, 4);
assertEquals(comp, -1);
comp = mem2.compareTo(0, 4, mem1, 0, 4);
assertEquals(comp, 1);
//different lengths
comp = mem1.compareTo(0, 4, mem3, 0, 5);
assertEquals(comp, -1);
comp = mem3.compareTo(0, 5, mem1, 0, 4);
assertEquals(comp, 1);
comp = mem3.compareTo(0, 5, mem4, 0, 5);
assertEquals(comp, 0);
comp = mem3.compareTo(0, 4, mem4, 1, 4);
assertEquals(comp, -1);
((ResourceImpl)mem3).checkValidAndBounds(0, 5);
}
@Test
public void checkCompareToDirect() {
byte[] arr1 = new byte[] {0, 1, 2, 3};
byte[] arr2 = new byte[] {0, 1, 2, 4};
byte[] arr3 = new byte[] {0, 1, 2, 3, 4};
try (WritableMemory mem1 = WritableMemory.allocateDirect(4);
WritableMemory mem2 = WritableMemory.allocateDirect(4);
WritableMemory mem3 = WritableMemory.allocateDirect(5))
{
mem1.putByteArray(0, arr1, 0, 4);
mem2.putByteArray(0, arr2, 0, 4);
mem3.putByteArray(0, arr3, 0, 5);
int comp = mem1.compareTo(0, 3, mem2, 0, 3);
assertEquals(comp, 0);
comp = mem1.compareTo(0, 4, mem2, 0, 4);
assertEquals(comp, -1);
comp = mem2.compareTo(0, 4, mem1, 0, 4);
assertEquals(comp, 1);
//different lengths
comp = mem1.compareTo(0, 4, mem3, 0, 5);
assertEquals(comp, -1);
comp = mem3.compareTo(0, 5, mem1, 0, 4);
assertEquals(comp, 1);
}
}
@Test
public void testCompareToSameStart() {
Memory mem = WritableMemory.allocate(3);
assertEquals(-1, mem.compareTo(0, 1, mem, 0, 2));
assertEquals(0, mem.compareTo(1, 1, mem, 1, 1));
assertEquals(1, mem.compareTo(1, 2, mem, 1, 1));
}
@Test
public void checkAsBuffer() {
WritableMemory wmem = WritableMemory.allocate(64);
WritableBuffer wbuf = wmem.asWritableBuffer();
wbuf.setPosition(32);
for (int i = 32; i < 64; i++) { wbuf.putByte((byte)i); }
//println(wbuf.toHexString("Buf", 0, (int)wbuf.getCapacity()));
Buffer buf = wmem.asBuffer();
buf.setPosition(32);
for (int i = 32; i < 64; i++) {
assertEquals(buf.getByte(), i);
}
}
@Test
public void checkCumAndRegionOffset() {
WritableMemory wmem = WritableMemory.allocate(64);
WritableMemory reg = wmem.writableRegion(32, 32);
assertEquals(reg.getTotalOffset(), 32);
assertEquals(((ResourceImpl)reg).getCumulativeOffset(0), 32 + 16);
}
@Test
public void checkIsSameResource() {
byte[] byteArr = new byte[64];
WritableMemory wmem1 = WritableMemory.writableWrap(byteArr);
WritableMemory wmem2 = WritableMemory.writableWrap(byteArr);
assertTrue(wmem1.isSameResource(wmem2));
}
@Test
public void checkAsWritableBufferWithBB() {
ByteBuffer byteBuf = ByteBuffer.allocate(64);
byteBuf.position(16);
byteBuf.limit(48);
WritableMemory wmem = WritableMemory.writableWrap(byteBuf);
WritableBuffer wbuf = wmem.asWritableBuffer();
assertEquals(wbuf.getCapacity(), 64);
assertEquals(wbuf.getPosition(), 0);
assertEquals(wbuf.getEnd(), 64);
}
@Test(expectedExceptions = ReadOnlyException.class)
public void checkAsWritableRegionRO() {
ByteBuffer byteBuf = ByteBuffer.allocate(64);
WritableMemory wmem = (WritableMemory) Memory.wrap(byteBuf);
wmem.writableRegion(0, 1);
}
@Test(expectedExceptions = ReadOnlyException.class)
public void checkAsWritableBufferRO() {
ByteBuffer byteBuf = ByteBuffer.allocate(64);
WritableMemory wmem = (WritableMemory) Memory.wrap(byteBuf);
wmem.asWritableBuffer();
}
@Test void checkZeroMemory() {
WritableMemory wmem = WritableMemory.allocate(8);
WritableMemory reg = wmem.writableRegion(0, 0);
assertEquals(reg.getCapacity(), 0);
}
@Test
public void checkAsBufferNonNative() {
WritableMemory wmem = WritableMemory.allocate(64);
wmem.putShort(0, (short) 1);
Buffer buf = wmem.asBuffer(Util.NON_NATIVE_BYTE_ORDER);
assertEquals(buf.getShort(0), 256);
}
@Test
public void printlnTest() {
println("PRINTING: " + this.getClass().getName());
}
/**
* @param s value to print
*/
static void println(String s) {
//System.out.println(s); //disable here
}
}
| 2,313 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/CopyMemoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.apache.datasketches.memory.internal.Util.UNSAFE_COPY_THRESHOLD_BYTES;
import static org.testng.Assert.assertEquals;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.Assert;
import org.testng.annotations.Test;
public class CopyMemoryTest {
@Test
public void heapWSource() {
int k1 = 1 << 20; //longs
int k2 = 2 * k1;
WritableMemory srcMem = genMem(k1, false); //!empty
//println(srcMem.toHexString("src: ", 0, k1 << 3));
WritableMemory dstMem = genMem(k2, true);
srcMem.copyTo(0, dstMem, k1 << 3, k1 << 3);
//println(dstMem.toHexString("dst: ", 0, k2 << 3));
check(dstMem, k1, k1, 1);
}
@Test
public void heapROSource() {
int k1 = 1 << 20; //longs
int k2 = 2 * k1;
Memory srcMem = genMem(k1, false); //!empty
WritableMemory dstMem = genMem(k2, true);
srcMem.copyTo(0, dstMem, k1 << 3, k1 << 3);
check(dstMem, k1, k1, 1);
}
@Test
public void directWSource() throws Exception {
int k1 = 1 << 20; //longs
int k2 = 2 * k1;
try (WritableMemory srcMem = genWRH(k1, false)) {
WritableMemory dstMem = genMem(k2, true);
srcMem.copyTo(0, dstMem, k1 << 3, k1 << 3);
check(dstMem, k1, k1, 1);
}
}
@Test
public void directROSource() throws Exception {
int k1 = 1 << 20; //longs
int k2 = 2 * k1;
try (Memory srcMem = genWRH(k1, false)) {
WritableMemory dstMem = genMem(k2, true);
srcMem.copyTo(0, dstMem, k1 << 3, k1 << 3);
check(dstMem, k1, k1, 1);
}
}
@Test
public void heapWSrcRegion() {
int k1 = 1 << 20; //longs
//gen baseMem of k1 longs w data
WritableMemory baseMem = genMem(k1, false); //!empty
//gen src region of k1/2 longs, off= k1/2
WritableMemory srcReg = baseMem.writableRegion((k1 / 2) << 3, (k1 / 2) << 3);
WritableMemory dstMem = genMem(2 * k1, true); //empty
srcReg.copyTo(0, dstMem, k1 << 3, (k1 / 2) << 3);
//println(dstMem.toHexString("dstMem: ", k1 << 3, (k1/2) << 3));
check(dstMem, k1, k1 / 2, (k1 / 2) + 1);
}
@Test
public void heapROSrcRegion() {
int k1 = 1 << 20; //longs
//gen baseMem of k1 longs w data
WritableMemory baseMem = genMem(k1, false); //!empty
//gen src region of k1/2 longs, off= k1/2
Memory srcReg = baseMem.region((k1 / 2) << 3, (k1 / 2) << 3);
WritableMemory dstMem = genMem(2 * k1, true); //empty
srcReg.copyTo(0, dstMem, k1 << 3, (k1 / 2) << 3);
check(dstMem, k1, k1 / 2, (k1 / 2) + 1);
}
@Test
public void directROSrcRegion() throws Exception {
int k1 = 1 << 20; //longs
//gen baseMem of k1 longs w data, direct
try (Memory baseMem = genWRH(k1, false)) {
//gen src region of k1/2 longs, off= k1/2
Memory srcReg = baseMem.region((k1 / 2) << 3, (k1 / 2) << 3);
WritableMemory dstMem = genMem(2 * k1, true); //empty
srcReg.copyTo(0, dstMem, k1 << 3, (k1 / 2) << 3);
check(dstMem, k1, k1 / 2, (k1 / 2) + 1);
}
}
@Test
public void testOverlappingCopyLeftToRight() {
byte[] bytes = new byte[((UNSAFE_COPY_THRESHOLD_BYTES * 5) / 2) + 1];
ThreadLocalRandom.current().nextBytes(bytes);
byte[] referenceBytes = bytes.clone();
Memory referenceMem = Memory.wrap(referenceBytes);
WritableMemory mem = WritableMemory.writableWrap(bytes);
long copyLen = UNSAFE_COPY_THRESHOLD_BYTES * 2;
mem.copyTo(0, mem, UNSAFE_COPY_THRESHOLD_BYTES / 2, copyLen);
Assert.assertEquals(0, mem.compareTo(UNSAFE_COPY_THRESHOLD_BYTES / 2, copyLen, referenceMem, 0,
copyLen));
}
@Test
public void testOverlappingCopyRightToLeft() {
byte[] bytes = new byte[((UNSAFE_COPY_THRESHOLD_BYTES * 5) / 2) + 1];
ThreadLocalRandom.current().nextBytes(bytes);
byte[] referenceBytes = bytes.clone();
Memory referenceMem = Memory.wrap(referenceBytes);
WritableMemory mem = WritableMemory.writableWrap(bytes);
long copyLen = UNSAFE_COPY_THRESHOLD_BYTES * 2;
mem.copyTo(UNSAFE_COPY_THRESHOLD_BYTES / 2, mem, 0, copyLen);
Assert.assertEquals(0, mem.compareTo(0, copyLen, referenceMem, UNSAFE_COPY_THRESHOLD_BYTES / 2,
copyLen));
}
private static void check(Memory mem, int offsetLongs, int lengthLongs, int startValue) {
int offBytes = offsetLongs << 3;
for (long i = 0; i < lengthLongs; i++) {
assertEquals(mem.getLong(offBytes + (i << 3)), i + startValue);
}
}
private static WritableMemory genWRH(int longs, boolean empty) {
WritableMemory mem = WritableMemory.allocateDirect(longs << 3);
if (empty) {
mem.clear();
} else {
for (int i = 0; i < longs; i++) { mem.putLong(i << 3, i + 1); }
}
return mem;
}
private static WritableMemory genMem(int longs, boolean empty) {
WritableMemory mem = WritableMemory.allocate(longs << 3);
if (!empty) {
for (int i = 0; i < longs; i++) { mem.putLong(i << 3, i + 1); }
}
return mem;
}
@Test
public void printlnTest() {
println("PRINTING: " + this.getClass().getName());
}
/**
* @param s value to print
*/
static void println(String s) {
//System.out.println(s); //disable here
}
}
| 2,314 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/CommonBufferTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.testng.Assert.assertEquals;
import org.apache.datasketches.memory.WritableBuffer;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.annotations.Test;
public class CommonBufferTest {
@Test
public void checkSetGet() throws Exception {
int memCapacity = 60; //must be at least 60
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
WritableBuffer buf = mem.asWritableBuffer();
assertEquals(buf.getCapacity(), memCapacity);
setGetTests(buf);
setGetTests2(buf);
}
}
public static void setGetTests(WritableBuffer buf) {
buf.putBoolean(true);
buf.putBoolean(false);
buf.putByte((byte) -1);
buf.putByte((byte) 0);
buf.putChar('A');
buf.putChar('Z');
buf.putShort(Short.MAX_VALUE);
buf.putShort(Short.MIN_VALUE);
buf.putInt(Integer.MAX_VALUE);
buf.putInt(Integer.MIN_VALUE);
buf.putFloat(Float.MAX_VALUE);
buf.putFloat(Float.MIN_VALUE);
buf.putLong(Long.MAX_VALUE);
buf.putLong(Long.MIN_VALUE);
buf.putDouble(Double.MAX_VALUE);
buf.putDouble(Double.MIN_VALUE);
buf.resetPosition();
assertEquals(buf.getBoolean(buf.getPosition()), true);
assertEquals(buf.getBoolean(), true);
assertEquals(buf.getBoolean(buf.getPosition()), false);
assertEquals(buf.getBoolean(), false);
assertEquals(buf.getByte(buf.getPosition()), (byte) -1);
assertEquals(buf.getByte(), (byte) -1);
assertEquals(buf.getByte(buf.getPosition()), (byte)0);
assertEquals(buf.getByte(), (byte)0);
assertEquals(buf.getChar(buf.getPosition()), 'A');
assertEquals(buf.getChar(), 'A');
assertEquals(buf.getChar(buf.getPosition()), 'Z');
assertEquals(buf.getChar(), 'Z');
assertEquals(buf.getShort(buf.getPosition()), Short.MAX_VALUE);
assertEquals(buf.getShort(), Short.MAX_VALUE);
assertEquals(buf.getShort(buf.getPosition()), Short.MIN_VALUE);
assertEquals(buf.getShort(), Short.MIN_VALUE);
assertEquals(buf.getInt(buf.getPosition()), Integer.MAX_VALUE);
assertEquals(buf.getInt(), Integer.MAX_VALUE);
assertEquals(buf.getInt(buf.getPosition()), Integer.MIN_VALUE);
assertEquals(buf.getInt(), Integer.MIN_VALUE);
assertEquals(buf.getFloat(buf.getPosition()), Float.MAX_VALUE);
assertEquals(buf.getFloat(), Float.MAX_VALUE);
assertEquals(buf.getFloat(buf.getPosition()), Float.MIN_VALUE);
assertEquals(buf.getFloat(), Float.MIN_VALUE);
assertEquals(buf.getLong(buf.getPosition()), Long.MAX_VALUE);
assertEquals(buf.getLong(), Long.MAX_VALUE);
assertEquals(buf.getLong(buf.getPosition()), Long.MIN_VALUE);
assertEquals(buf.getLong(), Long.MIN_VALUE);
assertEquals(buf.getDouble(buf.getPosition()), Double.MAX_VALUE);
assertEquals(buf.getDouble(), Double.MAX_VALUE);
assertEquals(buf.getDouble(buf.getPosition()), Double.MIN_VALUE);
assertEquals(buf.getDouble(), Double.MIN_VALUE);
}
public static void setGetTests2(WritableBuffer buf) {
buf.putBoolean(0, true);
buf.putBoolean(1, false);
buf.putByte(2, (byte) -1);
buf.putByte(3, (byte) 0);
buf.putChar(4,'A');
buf.putChar(6,'Z');
buf.putShort(8, Short.MAX_VALUE);
buf.putShort(10, Short.MIN_VALUE);
buf.putInt(12, Integer.MAX_VALUE);
buf.putInt(16, Integer.MIN_VALUE);
buf.putFloat(20, Float.MAX_VALUE);
buf.putFloat(24, Float.MIN_VALUE);
buf.putLong(28, Long.MAX_VALUE);
buf.putLong(36, Long.MIN_VALUE);
buf.putDouble(44, Double.MAX_VALUE);
buf.putDouble(52, Double.MIN_VALUE);
assertEquals(buf.getBoolean(0), true);
assertEquals(buf.getBoolean(1), false);
assertEquals(buf.getByte(2), (byte) -1);
assertEquals(buf.getByte(3), (byte)0);
assertEquals(buf.getChar(4), 'A');
assertEquals(buf.getChar(6), 'Z');
assertEquals(buf.getShort(8), Short.MAX_VALUE);
assertEquals(buf.getShort(10), Short.MIN_VALUE);
assertEquals(buf.getInt(12), Integer.MAX_VALUE);
assertEquals(buf.getInt(16), Integer.MIN_VALUE);
assertEquals(buf.getFloat(20), Float.MAX_VALUE);
assertEquals(buf.getFloat(24), Float.MIN_VALUE);
assertEquals(buf.getLong(28), Long.MAX_VALUE);
assertEquals(buf.getLong(36), Long.MIN_VALUE);
assertEquals(buf.getDouble(44), Double.MAX_VALUE);
assertEquals(buf.getDouble(52), Double.MIN_VALUE);
}
@Test
public void checkSetGetArrays() throws Exception {
int memCapacity = 32;
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
WritableBuffer buf = mem.asWritableBuffer();
assertEquals(buf.getCapacity(), memCapacity);
setGetArraysTests(buf);
}
}
public static void setGetArraysTests(WritableBuffer buf) {
int words = 4;
boolean[] srcArray1 = {true, false, true, false};
boolean[] dstArray1 = new boolean[words];
buf.resetPosition();
buf.fill((byte)127);
buf.resetPosition();
buf.putBooleanArray(srcArray1, 0, words);
buf.resetPosition();
buf.getBooleanArray(dstArray1, 0, words);
for (int i = 0; i < words; i++) {
assertEquals(dstArray1[i], srcArray1[i]);
}
byte[] srcArray2 = { 1, -2, 3, -4 };
byte[] dstArray2 = new byte[4];
buf.resetPosition();
buf.putByteArray(srcArray2, 0, words);
buf.resetPosition();
buf.getByteArray(dstArray2, 0, words);
for (int i = 0; i < words; i++) {
assertEquals(dstArray2[i], srcArray2[i]);
}
char[] srcArray3 = { 'A', 'B', 'C', 'D' };
char[] dstArray3 = new char[words];
buf.resetPosition();
buf.putCharArray(srcArray3, 0, words);
buf.resetPosition();
buf.getCharArray(dstArray3, 0, words);
for (int i = 0; i < words; i++) {
assertEquals(dstArray3[i], srcArray3[i]);
}
double[] srcArray4 = { 1.0, -2.0, 3.0, -4.0 };
double[] dstArray4 = new double[words];
buf.resetPosition();
buf.putDoubleArray(srcArray4, 0, words);
buf.resetPosition();
buf.getDoubleArray(dstArray4, 0, words);
for (int i = 0; i < words; i++) {
assertEquals(dstArray4[i], srcArray4[i], 0.0);
}
float[] srcArray5 = { (float)1.0, (float)-2.0, (float)3.0, (float)-4.0 };
float[] dstArray5 = new float[words];
buf.resetPosition();
buf.putFloatArray(srcArray5, 0, words);
buf.resetPosition();
buf.getFloatArray(dstArray5, 0, words);
for (int i = 0; i < words; i++) {
assertEquals(dstArray5[i], srcArray5[i], 0.0);
}
int[] srcArray6 = { 1, -2, 3, -4 };
int[] dstArray6 = new int[words];
buf.resetPosition();
buf.putIntArray(srcArray6, 0, words);
buf.resetPosition();
buf.getIntArray(dstArray6, 0, words);
for (int i = 0; i < words; i++) {
assertEquals(dstArray6[i], srcArray6[i]);
}
long[] srcArray7 = { 1, -2, 3, -4 };
long[] dstArray7 = new long[words];
buf.resetPosition();
buf.putLongArray(srcArray7, 0, words);
buf.resetPosition();
buf.getLongArray(dstArray7, 0, words);
for (int i = 0; i < words; i++) {
assertEquals(dstArray7[i], srcArray7[i]);
}
short[] srcArray8 = { 1, -2, 3, -4 };
short[] dstArray8 = new short[words];
buf.resetPosition();
buf.putShortArray(srcArray8, 0, words);
buf.resetPosition();
buf.getShortArray(dstArray8, 0, words);
for (int i = 0; i < words; i++) {
assertEquals(dstArray8[i], srcArray8[i]);
}
}
@Test
public void checkSetGetPartialArraysWithOffset() throws Exception {
int memCapacity = 32;
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
WritableBuffer buf = mem.asWritableBuffer();
assertEquals(buf.getCapacity(), memCapacity);
setGetPartialArraysWithOffsetTests(buf);
}
}
public static void setGetPartialArraysWithOffsetTests(WritableBuffer buf) {
int items = 4;
boolean[] srcArray1 = {true, false, true, false};
boolean[] dstArray1 = new boolean[items];
buf.resetPosition();
buf.putBooleanArray(srcArray1, 2, items / 2);
buf.resetPosition();
buf.getBooleanArray(dstArray1, 2, items / 2);
for (int i = 2; i < items; i++) {
assertEquals(dstArray1[i], srcArray1[i]);
}
byte[] srcArray2 = { 1, -2, 3, -4 };
byte[] dstArray2 = new byte[items];
buf.resetPosition();
buf.putByteArray(srcArray2, 2, items / 2);
buf.resetPosition();
buf.getByteArray(dstArray2, 2, items / 2);
for (int i = 2; i < items; i++) {
assertEquals(dstArray2[i], srcArray2[i]);
}
char[] srcArray3 = { 'A', 'B', 'C', 'D' };
char[] dstArray3 = new char[items];
buf.resetPosition();
buf.putCharArray(srcArray3, 2, items / 2);
buf.resetPosition();
buf.getCharArray(dstArray3, 2, items / 2);
for (int i = 2; i < items; i++) {
assertEquals(dstArray3[i], srcArray3[i]);
}
double[] srcArray4 = { 1.0, -2.0, 3.0, -4.0 };
double[] dstArray4 = new double[items];
buf.resetPosition();
buf.putDoubleArray(srcArray4, 2, items / 2);
buf.resetPosition();
buf.getDoubleArray(dstArray4, 2, items / 2);
for (int i = 2; i < items; i++) {
assertEquals(dstArray4[i], srcArray4[i], 0.0);
}
float[] srcArray5 = { (float)1.0, (float)-2.0, (float)3.0, (float)-4.0 };
float[] dstArray5 = new float[items];
buf.resetPosition();
buf.putFloatArray(srcArray5, 2, items / 2);
buf.resetPosition();
buf.getFloatArray(dstArray5, 2, items / 2);
for (int i = 2; i < items; i++) {
assertEquals(dstArray5[i], srcArray5[i], 0.0);
}
int[] srcArray6 = { 1, -2, 3, -4 };
int[] dstArray6 = new int[items];
buf.resetPosition();
buf.putIntArray(srcArray6, 2, items / 2);
buf.resetPosition();
buf.getIntArray(dstArray6, 2, items / 2);
for (int i = 2; i < items; i++) {
assertEquals(dstArray6[i], srcArray6[i]);
}
long[] srcArray7 = { 1, -2, 3, -4 };
long[] dstArray7 = new long[items];
buf.resetPosition();
buf.putLongArray(srcArray7, 2, items / 2);
buf.resetPosition();
buf.getLongArray(dstArray7, 2, items / 2);
for (int i = 2; i < items; i++) {
assertEquals(dstArray7[i], srcArray7[i]);
}
short[] srcArray8 = { 1, -2, 3, -4 };
short[] dstArray8 = new short[items];
buf.resetPosition();
buf.putShortArray(srcArray8, 2, items / 2);
buf.resetPosition();
buf.getShortArray(dstArray8, 2, items / 2);
for (int i = 2; i < items; i++) {
assertEquals(dstArray8[i], srcArray8[i]);
}
}
@Test
public void checkSetClearMemoryRegions() throws Exception {
int memCapacity = 64; //must be 64
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
WritableBuffer buf = mem.asWritableBuffer();
assertEquals(buf.getCapacity(), memCapacity);
setClearMemoryRegionsTests(buf); //requires println enabled to visually check
buf.resetPosition();
for (int i = 0; i < memCapacity; i++) {
assertEquals(mem.getByte(i), 0);
}
}
}
//enable println statements to visually check
public static void setClearMemoryRegionsTests(WritableBuffer buf) {
int accessCapacity = (int)buf.getCapacity();
//define regions
int reg1Start = 0;
int reg1Len = 28;
int reg2Start = 28;
int reg2Len = 32;
//set region 1
byte b1 = 5;
buf.setStartPositionEnd(reg1Start, reg1Start, reg1Len);
buf.fill(b1);
buf.resetPosition();
for (int i = reg1Start; i < (reg1Len + reg1Start); i++) {
assertEquals(buf.getByte(), b1);
}
//println(buf.toHexString("Region1 to 5", reg1Start, reg1Len));
//set region 2
byte b2 = 7;
buf.setStartPositionEnd(reg2Start, reg2Start, reg2Start + reg2Len);
buf.fill(b2);
//println(mem.toHexString("Fill", 0, (int)mem.getCapacity()));
buf.resetPosition();
for (int i = reg2Start; i < (reg2Start + reg2Len); i++) {
assertEquals(buf.getByte(), b2);
}
//println(buf.toHexString("Region2 to 7", reg2Start, reg2Len));
//clear region 1
byte zeroByte = 0;
buf.setStartPositionEnd(reg1Start, reg1Start, reg2Len);
buf.resetPosition();
buf.clear();
buf.resetPosition();
for (int i = reg1Start; i < (reg1Start + reg1Len); i++) {
assertEquals(buf.getByte(), zeroByte);
}
//println(buf.toHexString("Region1 cleared", reg1Start, reg1Len));
//clear region 2
buf.setStartPositionEnd(reg2Start, reg2Start, reg2Start + reg2Len);
buf.resetPosition();
buf.clear();
buf.resetPosition();
for (int i = reg2Start; i < (reg2Len + reg2Start); i++) {
assertEquals(buf.getByte(), zeroByte);
}
//println(buf.toHexString("Region2 cleared", reg2Start, reg2Len));
//set all to ones
buf.setStartPositionEnd(reg1Start, reg1Start, accessCapacity);
byte b4 = 127;
buf.resetPosition();
buf.fill(b4);
buf.resetPosition();
for (int i = 0; i < accessCapacity; i++) {
assertEquals(buf.getByte(), b4);
}
//println(buf.toHexString("Region1 + Region2 all ones", 0, accessCapacity));
//clear all
buf.resetPosition();
buf.clear();
buf.resetPosition();
for (int i = 0; i < accessCapacity; i++) {
assertEquals(buf.getByte(), zeroByte);
}
//println(buf.toHexString("Region1 + Region2 cleared", 0, accessCapacity));
}
@Test
public void checkToHexStringAllMem() throws Exception {
int memCapacity = 48; //must be 48
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
WritableBuffer buf = mem.asWritableBuffer();
assertEquals(buf.getCapacity(), memCapacity);
toHexStringAllMemTests(buf); //requires println enabled to visually check
}
}
//enable println to visually check
public static void toHexStringAllMemTests(WritableBuffer buf) {
int memCapacity = (int)buf.getCapacity();
for (int i = 0; i < memCapacity; i++) {
buf.putByte((byte)i);
}
//println(buf.toHexString("Check toHexString(0, 48) to integers", 0, memCapacity));
//println(buf.toHexString("Check toHexString(8, 40)", 8, 40));
}
@Test
public void printlnTest() {
println("PRINTING: " + this.getClass().getName());
}
/**
* @param s value to print
*/
static void println(String s) {
//System.out.println(s); //disable here
}
}
| 2,315 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/Buffer2Test.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertTrue;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.Buffer;
import org.apache.datasketches.memory.DefaultMemoryRequestServer;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.ReadOnlyException;
import org.apache.datasketches.memory.WritableBuffer;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.annotations.Test;
public class Buffer2Test {
@Test
public void testWrapHeapByteBuf() {
ByteBuffer bb = ByteBuffer.allocate(64).order(ByteOrder.nativeOrder());
Byte b = 0;
while (bb.hasRemaining()) {
bb.put(b);
b++;
}
bb.position(0);
Buffer buffer = Buffer.wrap(bb.asReadOnlyBuffer().order(ByteOrder.nativeOrder()));
while (buffer.hasRemaining()) {
byte a1 = bb.get();
byte b1 = buffer.getByte();
assertEquals(a1, b1);
}
assertEquals(true, buffer.isHeapResource());
assertEquals(true, buffer.isByteBufferResource());
}
@Test
public void testWrapDirectBB() {
ByteBuffer bb = ByteBuffer.allocateDirect(64).order(ByteOrder.nativeOrder());
Byte b = 0;
while (bb.hasRemaining()) {
bb.put(b);
b++;
}
bb.position(0);
Buffer buffer = Buffer.wrap(bb);
while (buffer.hasRemaining()) {
assertEquals(bb.get(), buffer.getByte());
}
assertEquals(false, buffer.isHeapResource());
assertEquals(true, buffer.isByteBufferResource());
}
@Test
public void testWrapByteArray() {
byte[] byteArray = new byte[64];
for (byte i = 0; i < 64; i++) {
byteArray[i] = i;
}
Buffer buffer = Memory.wrap(byteArray).asBuffer();
int i = 0;
while (buffer.hasRemaining()) {
assertEquals(byteArray[i++], buffer.getByte());
}
buffer.setPosition(0);
byte[] copyByteArray = new byte[64];
buffer.getByteArray(copyByteArray, 0, 64);
assertEquals(byteArray, copyByteArray);
assertEquals(true, buffer.isHeapResource());
assertEquals(false, buffer.isByteBufferResource());
}
@Test
public void testWrapCharArray() {
char[] charArray = new char[64];
for (char i = 0; i < 64; i++) {
charArray[i] = i;
}
Buffer buffer = Memory.wrap(charArray).asBuffer();
int i = 0;
while (buffer.hasRemaining()) {
assertEquals(charArray[i++], buffer.getChar());
}
buffer.setPosition(0);
char[] copyCharArray = new char[64];
buffer.getCharArray(copyCharArray, 0, 64);
assertEquals(charArray, copyCharArray);
}
@Test
public void testWrapShortArray() {
short[] shortArray = new short[64];
for (short i = 0; i < 64; i++) {
shortArray[i] = i;
}
Buffer buffer = Memory.wrap(shortArray).asBuffer();
int i = 0;
while (buffer.hasRemaining()) {
assertEquals(shortArray[i++], buffer.getShort());
}
buffer.setPosition(0);
short[] copyShortArray = new short[64];
buffer.getShortArray(copyShortArray, 0, 64);
assertEquals(shortArray, copyShortArray);
}
@Test
public void testWrapIntArray() {
int[] intArray = new int[64];
for (int i = 0; i < 64; i++) {
intArray[i] = i;
}
Buffer buffer = Memory.wrap(intArray).asBuffer();
int i = 0;
while (buffer.hasRemaining()) {
assertEquals(intArray[i++], buffer.getInt());
}
buffer.setPosition(0);
int[] copyIntArray = new int[64];
buffer.getIntArray(copyIntArray, 0, 64);
assertEquals(intArray, copyIntArray);
}
@Test
public void testWrapLongArray() {
long[] longArray = new long[64];
for (int i = 0; i < 64; i++) {
longArray[i] = i;
}
Buffer buffer = Memory.wrap(longArray).asBuffer();
int i = 0;
while (buffer.hasRemaining()) {
assertEquals(longArray[i++], buffer.getLong());
}
buffer.setPosition(0);
long[] copyLongArray = new long[64];
buffer.getLongArray(copyLongArray, 0, 64);
assertEquals(longArray, copyLongArray);
}
@Test
public void testWrapFloatArray() {
float[] floatArray = new float[64];
for (int i = 0; i < 64; i++) {
floatArray[i] = i;
}
Buffer buffer = Memory.wrap(floatArray).asBuffer();
int i = 0;
while (buffer.hasRemaining()) {
assertEquals(floatArray[i++], buffer.getFloat());
}
buffer.setPosition(0);
float[] copyFloatArray = new float[64];
buffer.getFloatArray(copyFloatArray, 0, 64);
assertEquals(floatArray, copyFloatArray);
}
@Test
public void testWrapDoubleArray() {
double[] doubleArray = new double[64];
for (int i = 0; i < 64; i++) {
doubleArray[i] = i;
}
Buffer buffer = Memory.wrap(doubleArray).asBuffer();
int i = 0;
while (buffer.hasRemaining()) {
assertEquals(doubleArray[i++], buffer.getDouble());
}
buffer.setPosition(0);
double[] copyDoubleArray = new double[64];
buffer.getDoubleArray(copyDoubleArray, 0, 64);
assertEquals(doubleArray, copyDoubleArray);
}
@Test
public void testWrapBooleanArray() {
boolean[] booleanArray = new boolean[64];
for (int i = 0; i < 64; i++) {
if ((i % 3) == 0) {
booleanArray[i] = true;
}
}
Buffer buffer = Memory.wrap(booleanArray).asBuffer();
int i = 0;
while (buffer.hasRemaining()) {
assertEquals(booleanArray[i++], buffer.getBoolean());
}
buffer.setPosition(0);
boolean[] copyBooleanArray = new boolean[64];
buffer.getBooleanArray(copyBooleanArray, 0, 64);
for (int j = 0; j < copyBooleanArray.length; j++) {
assertEquals(booleanArray[j], copyBooleanArray[j]);
}
}
@Test
public void testByteBufferPositionPreservation() {
ByteBuffer bb = ByteBuffer.allocate(64).order(ByteOrder.nativeOrder());
Byte b = 0;
while (bb.hasRemaining()) {
bb.put(b);
b++;
}
bb.position(10);
Buffer buffer = Buffer.wrap(bb);
while (buffer.hasRemaining()) {
assertEquals(bb.get(), buffer.getByte());
}
}
@Test
public void testGetAndHasRemaining() {
ByteBuffer bb = ByteBuffer.allocate(64).order(ByteOrder.nativeOrder());
Byte b = 0;
while (bb.hasRemaining()) {
bb.put(b);
b++;
}
bb.position(10);
Buffer buffer = Buffer.wrap(bb);
assertEquals(bb.hasRemaining(), buffer.hasRemaining());
assertEquals(bb.remaining(), buffer.getRemaining());
}
@Test
public void testGetSetIncResetPosition() {
ByteBuffer bb = ByteBuffer.allocate(64).order(ByteOrder.nativeOrder());
Byte b = 0;
while (bb.hasRemaining()) {
bb.put(b);
b++;
}
bb.position(10);
Buffer buffer = Buffer.wrap(bb);
assertEquals(bb.position(), buffer.getPosition());
assertEquals(30, buffer.setPosition(30).getPosition());
assertEquals(40, buffer.incrementPosition(10).getPosition());
assertEquals(0, buffer.resetPosition().getPosition());
}
@Test
public void testByteBufferSlice() {
ByteBuffer bb = ByteBuffer.allocate(64).order(ByteOrder.nativeOrder());
Byte b = 0;
while (bb.hasRemaining()) {
bb.put(b);
b++;
}
bb.position(10);
Buffer buffer = Buffer.wrap(bb.slice().order(ByteOrder.nativeOrder()));
while (buffer.hasRemaining()) {
assertEquals(bb.get(), buffer.getByte());
}
assertEquals(bb.position(), buffer.getPosition() + 10);
assertEquals(30, buffer.setPosition(30).getPosition());
assertEquals(40, buffer.incrementPosition(10).getPosition());
assertEquals(0, buffer.resetPosition().getPosition());
}
@Test
public void testDuplicateAndRegion() {
ByteBuffer bb = ByteBuffer.allocate(64).order(ByteOrder.nativeOrder());
Byte b = 0;
while (bb.hasRemaining()) {
bb.put(b);
b++;
}
bb.position(10);
Buffer buffer = Buffer.wrap(bb.slice().order(ByteOrder.nativeOrder())); //slice size = 54
buffer.setPosition(30);//remaining = 24
Buffer dupBuffer = buffer.duplicate(); //all 54
Buffer regionBuffer = buffer.region(); //24
assertEquals(dupBuffer.getStart(), buffer.getStart());
assertEquals(regionBuffer.getStart(), buffer.getStart());
assertEquals(dupBuffer.getEnd(), buffer.getEnd());
assertEquals(regionBuffer.getEnd(), buffer.getRemaining());
assertEquals(dupBuffer.getPosition(), buffer.getPosition());
assertEquals(regionBuffer.getPosition(), 0);
assertEquals(dupBuffer.getCapacity(), buffer.getCapacity());
assertEquals(regionBuffer.getCapacity(), buffer.getCapacity() - 30);
}
@Test
public void checkRORegions() {
int n = 16;
int n2 = n / 2;
long[] arr = new long[n];
for (int i = 0; i < n; i++) { arr[i] = i; }
Memory mem = Memory.wrap(arr);
Buffer buf = mem.asBuffer();
Buffer reg = buf.region(n2 * 8, n2 * 8, buf.getByteOrder()); //top half
for (int i = 0; i < n2; i++) {
long v = reg.getLong(i * 8);
long e = i + n2;
assertEquals(v, e);
}
}
@Test
public void testAsMemory() {
ByteBuffer bb = ByteBuffer.allocate(64).order(ByteOrder.nativeOrder());
Byte b = 0;
while (bb.hasRemaining()) {
bb.put(b);
b++;
}
bb.position(10);
Buffer buffer = Buffer.wrap(bb);
Memory memory = buffer.asMemory();
assertEquals(buffer.getCapacity(), memory.getCapacity());
while (buffer.hasRemaining()) {
assertEquals(memory.getByte(buffer.getPosition()), buffer.getByte());
}
}
@Test(expectedExceptions = ReadOnlyException.class)
public void testROByteBuffer() {
byte[] arr = new byte[64];
ByteBuffer roBB = ByteBuffer.wrap(arr).asReadOnlyBuffer();
Buffer buf = Buffer.wrap(roBB);
WritableBuffer wbuf = (WritableBuffer) buf;
wbuf.putByte(0, (byte) 1);
}
@Test(expectedExceptions = ReadOnlyException.class)
public void testROByteBuffer2() {
byte[] arr = new byte[64];
ByteBuffer roBB = ByteBuffer.wrap(arr).asReadOnlyBuffer();
Buffer buf = Buffer.wrap(roBB);
WritableBuffer wbuf = (WritableBuffer) buf;
wbuf.putByteArray(arr, 0, 64);
}
@Test(expectedExceptions = ReadOnlyException.class)
public void testIllegalFill() {
byte[] arr = new byte[64];
ByteBuffer roBB = ByteBuffer.wrap(arr).asReadOnlyBuffer();
Buffer buf = Buffer.wrap(roBB);
WritableBuffer wbuf = (WritableBuffer) buf;
wbuf.fill((byte)0);
}
@Test
public void checkWritableWrap() {
ByteBuffer bb = ByteBuffer.allocate(16);
WritableBuffer buf = WritableBuffer.writableWrap(bb, ByteOrder.nativeOrder(), null);
assertNotNull(buf);
buf = WritableBuffer.writableWrap(bb, ByteOrder.nativeOrder(), new DefaultMemoryRequestServer());
assertNotNull(buf);
}
@Test
public void testWritableDuplicate() {
WritableMemory wmem = WritableMemory.writableWrap(new byte[1]);
WritableBuffer wbuf = wmem.asWritableBuffer();
WritableBuffer wbuf2 = wbuf.writableDuplicate();
assertEquals(wbuf2.getCapacity(), 1);
Buffer buf = wmem.asBuffer();
assertEquals(buf.getCapacity(), 1);
}
@Test
public void checkIndependence() {
int cap = 64;
WritableMemory wmem = WritableMemory.allocate(cap);
WritableBuffer wbuf1 = wmem.asWritableBuffer();
WritableBuffer wbuf2 = wmem.asWritableBuffer();
assertFalse(wbuf1 == wbuf2);
assertTrue(wbuf1.isSameResource(wbuf2));
WritableMemory reg1 = wmem.writableRegion(0, cap);
WritableMemory reg2 = wmem.writableRegion(0, cap);
assertFalse(reg1 == reg2);
assertTrue(reg1.isSameResource(reg2));
WritableBuffer wbuf3 = wbuf1.writableRegion();
WritableBuffer wbuf4 = wbuf1.writableRegion();
assertFalse(wbuf3 == wbuf4);
assertTrue(wbuf3.isSameResource(wbuf4));
}
@Test
public void printlnTest() {
println("PRINTING: " + this.getClass().getName());
}
/**
* @param s value to print
*/
static void println(String s) {
//System.out.println(s); //disable here
}
}
| 2,316 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/MemoryBoundaryCheckTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import org.apache.datasketches.memory.MemoryBoundsException;
import org.apache.datasketches.memory.WritableBuffer;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.annotations.Test;
public class MemoryBoundaryCheckTest {
private final WritableBuffer writableBuffer = WritableMemory.allocate(8).asWritableBuffer();
@Test(expectedExceptions = MemoryBoundsException.class)
public void testGetBoolean() {
writableBuffer.getBoolean(8);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testPutBoolean() {
writableBuffer.putBoolean(8, true);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testGetByte() {
writableBuffer.getByte(8);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testPutByte() {
writableBuffer.putByte(8, (byte) 1);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testGetChar() {
writableBuffer.getChar(7);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testPutChar() {
writableBuffer.putChar(7, 'a');
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testGetShort() {
writableBuffer.getShort(7);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testPutShort() {
writableBuffer.putShort(7, (short) 1);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testGetInt() {
writableBuffer.getInt(5);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testPutInt() {
writableBuffer.putInt(5, 1);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testGetFloat() {
writableBuffer.getFloat(5);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testPutFloat() {
writableBuffer.putFloat(5, 1f);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testGetLong() {
writableBuffer.getLong(1);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testPutLong() {
writableBuffer.putLong(1, 1L);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testGetDouble() {
writableBuffer.getDouble(1);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testPutDouble() {
writableBuffer.putDouble(1, 1d);
}
}
| 2,317 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/MemoryCloseExceptionTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
public class MemoryCloseExceptionTest {
}
| 2,318 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/Utf8Test.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.fail;
import java.io.IOException;
import java.nio.CharBuffer;
import java.util.ArrayList;
import java.util.List;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.MemoryBoundsException;
import org.apache.datasketches.memory.Utf8CodingException;
import org.apache.datasketches.memory.WritableMemory;
import org.apache.datasketches.memory.internal.Util.RandomCodePoints;
import org.testng.annotations.Test;
import com.google.protobuf.ByteString;
/**
* Adapted version of
* https://github.com/protocolbuffers/protobuf/blob/master/java/core/src/test/java/com/google/protobuf/DecodeUtf8Test.java
*
* <pre>Copyright 2008 Google Inc. All rights reserved.
* https://developers.google.com/protocol-buffers/
* See LICENSE.</pre>
*/
public class Utf8Test {
@Test
public void testRoundTripAllValidCodePoints() throws IOException { //the non-surrogate code pts
for (int cp = Character.MIN_CODE_POINT; cp < Character.MAX_CODE_POINT; cp++) {
if (!isSurrogateCodePoint(cp)) {
String refStr = new String(Character.toChars(cp));
assertRoundTrips(refStr);
}
}
}
@Test
public void testPutInvalidChars() { //The surrogates must be a pair, thus invalid alone
WritableMemory mem = WritableMemory.allocate(10);
WritableMemory emptyMem = WritableMemory.allocate(0);
for (int c = Character.MIN_SURROGATE; c <= Character.MAX_SURROGATE; c++) {
assertSurrogate(mem, (char) c);
assertSurrogate(emptyMem, (char) c);
}
}
private static void assertSurrogate(WritableMemory mem, char c) {
try {
mem.putCharsToUtf8(0, new String(new char[] {c}));
fail();
} catch (Utf8CodingException e) {
// Expected.
}
}
@Test
public void testPutInvaidSurrogatePairs() {
WritableMemory mem = WritableMemory.allocate(4);
StringBuilder sb = new StringBuilder();
sb.append(Character.MIN_HIGH_SURROGATE);
sb.append(Character.MAX_HIGH_SURROGATE);
try {
mem.putCharsToUtf8(0, sb);
} catch (Utf8CodingException e) {
//Expected;
}
}
@Test
public void testPutHighBMP() {
WritableMemory mem = WritableMemory.allocate(2);
StringBuilder sb = new StringBuilder();
sb.append("\uE000");
try {
mem.putCharsToUtf8(0, sb);
} catch (Utf8CodingException e) {
//Expected;
}
}
@Test
public void testPutExtendedAscii() {
WritableMemory mem = WritableMemory.allocate(1);
StringBuilder sb = new StringBuilder();
sb.append("\u07FF");
try {
mem.putCharsToUtf8(0, sb);
} catch (Utf8CodingException e) {
//Expected;
}
}
@Test
public void testPutOneAsciiToEmpty() {
WritableMemory mem = WritableMemory.allocate(0);
StringBuilder sb = new StringBuilder();
sb.append("a");
try {
mem.putCharsToUtf8(0, sb);
} catch (Utf8CodingException e) {
//Expected;
}
}
@Test
public void testPutValidSurrogatePair() {
WritableMemory mem = WritableMemory.allocate(4);
StringBuilder sb = new StringBuilder();
sb.append(Character.MIN_HIGH_SURROGATE);
sb.append(Character.MIN_LOW_SURROGATE);
mem.putCharsToUtf8(0, sb);
}
// Test all 1, 2, 3 invalid byte combinations. Valid ones would have been covered above.
@Test
public void testOneByte() {
int valid = 0;
for (int i = Byte.MIN_VALUE; i <= Byte.MAX_VALUE; i++) {
ByteString bs = ByteString.copyFrom(new byte[] {(byte) i });
if (!bs.isValidUtf8()) { //from -128 to -1
checkInvalidBytes(bs.toByteArray());
} else {
valid++; //from 0 to 127
}
}
assertEquals(IsValidUtf8TestUtil.EXPECTED_ONE_BYTE_ROUNDTRIPPABLE_COUNT, valid);
}
@Test
public void testTwoBytes() {
int valid = 0;
for (int i = Byte.MIN_VALUE; i <= Byte.MAX_VALUE; i++) {
for (int j = Byte.MIN_VALUE; j <= Byte.MAX_VALUE; j++) {
ByteString bs = ByteString.copyFrom(new byte[]{(byte) i, (byte) j});
if (!bs.isValidUtf8()) {
checkInvalidBytes(bs.toByteArray());
} else {
valid++;
}
}
}
assertEquals(IsValidUtf8TestUtil.EXPECTED_TWO_BYTE_ROUNDTRIPPABLE_COUNT, valid);
}
//@Test
//This test is very long, and doesn't cover the 4-byte combinations.
// This is replaced by the test following which does cover some 4-byte combinations.
public void testThreeBytes() {
// Travis' OOM killer doesn't like this test
if (System.getenv("TRAVIS") == null) {
int count = 0;
int valid = 0;
for (int i = Byte.MIN_VALUE; i <= Byte.MAX_VALUE; i++) {
for (int j = Byte.MIN_VALUE; j <= Byte.MAX_VALUE; j++) {
for (int k = Byte.MIN_VALUE; k <= Byte.MAX_VALUE; k++) {
byte[] bytes = new byte[]{(byte) i, (byte) j, (byte) k};
ByteString bs = ByteString.copyFrom(bytes);
if (!bs.isValidUtf8()) {
checkInvalidBytes(bytes);
} else {
valid++;
}
count++;
if ((count % 1000000L) == 0) {
println("Processed " + (count / 1000000L) + " million characters");
}
}
}
}
assertEquals(IsValidUtf8TestUtil.EXPECTED_THREE_BYTE_ROUNDTRIPPABLE_COUNT, valid);
}
}
/* These code points can be used by the following test to customize different regions of the
* Code Point space. This randomized test can replace the exhaustive
* combinatorially explosive previous test, which doesn't cover the 4 byte combinations.
*/
static final int min1ByteCP = 0; //ASCII
static final int min2ByteCP = 0X000080;
static final int min3ByteCP = 0X000800;
static final int min4ByteCP = Character.MIN_SUPPLEMENTARY_CODE_POINT; //0X010000;
static final int minPlane2CP = 0X020000;
static final int maxCodePoint = Character.MAX_CODE_POINT; //0X10FFFF
static final int minSurr = Character.MIN_SURROGATE; //0X00D800;
static final int maxSurr = Character.MAX_SURROGATE; //0X00E000;
@Test
//randomly selects CP from a range that include 1, 2, 3 and 4 byte encodings.
// with 50% coming from plane 0 and 50% coming from plane 1.
public void checkRandomValidCodePoints() {
RandomCodePoints rcp = new RandomCodePoints(true);
int numCP = 1000;
int[] cpArr = new int[numCP];
rcp.fillCodePointArray(cpArr, 0, minPlane2CP);
String rcpStr = new String(cpArr, 0, numCP);
//println(rcpStr);
WritableMemory wmem = WritableMemory.allocate(4 * numCP);
int utf8Bytes = (int) wmem.putCharsToUtf8(0, rcpStr);
StringBuilder sb = new StringBuilder();
try {
wmem.getCharsFromUtf8(0L, utf8Bytes, (Appendable) sb);
} catch (IOException | Utf8CodingException e) {
throw new RuntimeException(e);
}
checkStrings(sb.toString(), rcpStr);
CharBuffer cb = CharBuffer.allocate(rcpStr.length());
try {
wmem.getCharsFromUtf8(0L, utf8Bytes, cb);
} catch (IOException | Utf8CodingException e) {
throw new RuntimeException(e);
}
String cbStr = sb.toString();
assertEquals(cbStr.length(), rcpStr.length());
checkStrings(cbStr, rcpStr);
}
@Test
public void checkRandomValidCodePoints2() {
//checks the non-deterministic constructor
@SuppressWarnings("unused")
RandomCodePoints rcp = new RandomCodePoints(false);
}
/**
* Tests that round tripping of a sample of four byte permutations work.
*/
@Test
public void testInvalid_4BytesSamples() {
// Bad trailing bytes
checkInvalidInts(0xF0, 0xA4, 0xAD, 0x7F);
checkInvalidInts(0xF0, 0xA4, 0xAD, 0xC0);
// Special cases for byte2
checkInvalidInts(0xF0, 0x8F, 0xAD, 0xA2);
checkInvalidInts(0xF4, 0x90, 0xAD, 0xA2);
}
@Test
public void testRealStrings() throws IOException {
// English
assertRoundTrips("The quick brown fox jumps over the lazy dog");
// German
assertRoundTrips("Quizdeltagerne spiste jordb\u00e6r med fl\u00f8de, mens cirkusklovnen");
// Japanese
assertRoundTrips(
"\u3044\u308d\u306f\u306b\u307b\u3078\u3068\u3061\u308a\u306c\u308b\u3092");
// Hebrew
assertRoundTrips(
"\u05d3\u05d2 \u05e1\u05e7\u05e8\u05df \u05e9\u05d8 \u05d1\u05d9\u05dd "
+ "\u05de\u05d0\u05d5\u05db\u05d6\u05d1 \u05d5\u05dc\u05e4\u05ea\u05e2"
+ " \u05de\u05e6\u05d0 \u05dc\u05d5 \u05d7\u05d1\u05e8\u05d4 "
+ "\u05d0\u05d9\u05da \u05d4\u05e7\u05dc\u05d9\u05d8\u05d4");
// Thai
assertRoundTrips(
" \u0e08\u0e07\u0e1d\u0e48\u0e32\u0e1f\u0e31\u0e19\u0e1e\u0e31\u0e12"
+ "\u0e19\u0e32\u0e27\u0e34\u0e0a\u0e32\u0e01\u0e32\u0e23");
// Chinese
assertRoundTrips(
"\u8fd4\u56de\u94fe\u4e2d\u7684\u4e0b\u4e00\u4e2a\u4ee3\u7406\u9879\u9009\u62e9\u5668");
// Chinese with 4-byte chars
assertRoundTrips("\uD841\uDF0E\uD841\uDF31\uD841\uDF79\uD843\uDC53\uD843\uDC78"
+ "\uD843\uDC96\uD843\uDCCF\uD843\uDCD5\uD843\uDD15\uD843\uDD7C\uD843\uDD7F"
+ "\uD843\uDE0E\uD843\uDE0F\uD843\uDE77\uD843\uDE9D\uD843\uDEA2");
// Mixed
assertRoundTrips(
"The quick brown \u3044\u308d\u306f\u306b\u307b\u3078\u8fd4\u56de\u94fe"
+ "\u4e2d\u7684\u4e0b\u4e00");
}
@Test
public void checkNonEmptyDestinationForDecode() {
StringBuilder sb = new StringBuilder();
sb.append("abc"); //current contents of destination
int startChars = sb.toString().toCharArray().length;
String refStr = "Quizdeltagerne spiste jordb\u00e6r med fl\u00f8de, mens cirkusklovnen";
byte[] refByteArr = refStr.getBytes(UTF_8);
int addBytes = refByteArr.length;
WritableMemory refMem = WritableMemory.writableWrap(refByteArr);
int decodedChars = refMem.getCharsFromUtf8(0, addBytes, sb);
String finalStr = sb.toString();
int finalChars = finalStr.toCharArray().length;
assertEquals(decodedChars + startChars, finalChars);
println("Decoded chars: " + decodedChars);
println("Final chars: " + finalChars);
println(sb.toString());
}
@Test
public void checkNonEmptyDestinationForEncode() {
String refStr = "Quizdeltagerne spiste jordb\u00e6r med fl\u00f8de, mens cirkusklovnen";
byte[] refByteArr = refStr.getBytes(UTF_8);
int refBytes = refByteArr.length;
int offset = 100;
WritableMemory tgtMem = WritableMemory.allocate(refBytes + offset);
long bytesEncoded = tgtMem.putCharsToUtf8(offset, refStr);
assertEquals(bytesEncoded, refBytes);
}
@Test
public void testOverlong() {
checkInvalidInts(0xc0, 0xaf);
checkInvalidInts(0xe0, 0x80, 0xaf);
checkInvalidInts(0xf0, 0x80, 0x80, 0xaf);
// Max overlong
checkInvalidInts(0xc1, 0xbf);
checkInvalidInts(0xe0, 0x9f, 0xbf);
checkInvalidInts(0xf0 ,0x8f, 0xbf, 0xbf);
// null overlong
checkInvalidInts(0xc0, 0x80);
checkInvalidInts(0xe0, 0x80, 0x80);
checkInvalidInts(0xf0, 0x80, 0x80, 0x80);
}
@Test
public void testIllegalCodepoints() {
// Single surrogate
checkInvalidInts(0xed, 0xa0, 0x80);
checkInvalidInts(0xed, 0xad, 0xbf);
checkInvalidInts(0xed, 0xae, 0x80);
checkInvalidInts(0xed, 0xaf, 0xbf);
checkInvalidInts(0xed, 0xb0, 0x80);
checkInvalidInts(0xed, 0xbe, 0x80);
checkInvalidInts(0xed, 0xbf, 0xbf);
// Paired surrogates
checkInvalidInts(0xed, 0xa0, 0x80, 0xed, 0xb0, 0x80);
checkInvalidInts(0xed, 0xa0, 0x80, 0xed, 0xbf, 0xbf);
checkInvalidInts(0xed, 0xad, 0xbf, 0xed, 0xb0, 0x80);
checkInvalidInts(0xed, 0xad, 0xbf, 0xed, 0xbf, 0xbf);
checkInvalidInts(0xed, 0xae, 0x80, 0xed, 0xb0, 0x80);
checkInvalidInts(0xed, 0xae, 0x80, 0xed, 0xbf, 0xbf);
checkInvalidInts(0xed, 0xaf, 0xbf, 0xed, 0xb0, 0x80);
checkInvalidInts(0xed, 0xaf, 0xbf, 0xed, 0xbf, 0xbf);
}
@Test
public void testBufferSlice() throws IOException {
String str = "The quick brown fox jumps over the lazy dog";
assertRoundTrips(str, 4, 10, 4);
assertRoundTrips(str, 0, str.length(), 0);
}
@Test
public void testInvalidBufferSlice() { //these are pure Memory bounds violations
byte[] bytes = "The quick brown fox jumps over the lazy dog".getBytes(UTF_8);
checkInvalidSlice(bytes, bytes.length - 3, 4);
checkInvalidSlice(bytes, bytes.length, 1);
checkInvalidSlice(bytes, bytes.length + 1, 0);
checkInvalidSlice(bytes, 0, bytes.length + 1);
}
private static void checkInvalidInts(int... bytesAsInt) { //invalid byte sequences
byte[] bytes = new byte[bytesAsInt.length];
for (int i = 0; i < bytesAsInt.length; i++) {
bytes[i] = (byte) bytesAsInt[i];
}
checkInvalidBytes(bytes);
}
private static void checkInvalidBytes(byte[] bytes) {
int bytesLen = bytes.length;
try {
Memory.wrap(bytes).getCharsFromUtf8(0, bytesLen, new StringBuilder());
fail();
} catch (Utf8CodingException e) {
// Expected.
}
try {
CharBuffer cb = CharBuffer.allocate(bytesLen);
Memory.wrap(bytes).getCharsFromUtf8(0, bytesLen, cb);
fail();
} catch (Utf8CodingException | IOException e) {
// Expected.
}
}
private static void checkInvalidSlice(byte[] bytes, int index, int size) {
try {
Memory mem = Memory.wrap(bytes);
mem.getCharsFromUtf8(index, size, new StringBuilder());
fail();
} catch (MemoryBoundsException e) { //Pure bounds violation
// Expected.
}
}
/**
* Performs round-trip test using the given reference string
* @param refStr the reference string
* @throws IOException when IOException occurs
*/
private static void assertRoundTrips(String refStr) throws IOException {
assertRoundTrips(refStr, refStr.toCharArray().length, 0, -1);
}
/**
* Performs round-trip test using the given reference string
* @param refStr the reference string
* @param refSubCharLen the number of characters expected to be decoded
* @param offsetBytes starting utf8 byte offset
* @param utf8LengthBytes length of utf8 bytes
* @throws IOException when IOException occurs
*/
private static void assertRoundTrips(String refStr, int refSubCharLen, int offsetBytes,
int utf8LengthBytes) throws IOException {
byte[] refByteArr = refStr.getBytes(UTF_8);
if (utf8LengthBytes == -1) {
utf8LengthBytes = refByteArr.length;
}
Memory refMem = Memory.wrap(refByteArr);
byte[] refByteArr2 = new byte[refByteArr.length + 1];
System.arraycopy(refByteArr, 0, refByteArr2, 1, refByteArr.length);
Memory refReg = Memory.wrap(refByteArr2).region(1, refByteArr.length);
WritableMemory dstMem = WritableMemory.allocate(refByteArr.length);
WritableMemory dstMem2 =
WritableMemory.allocate(refByteArr.length + 1).writableRegion(1, refByteArr.length);
// Test with Memory objects, where base offset != 0
assertRoundTrips(refStr, refSubCharLen, offsetBytes, utf8LengthBytes, refByteArr, refMem, dstMem);
assertRoundTrips(refStr, refSubCharLen, offsetBytes, utf8LengthBytes, refByteArr, refMem, dstMem2);
assertRoundTrips(refStr, refSubCharLen, offsetBytes, utf8LengthBytes, refByteArr, refReg, dstMem);
assertRoundTrips(refStr, refSubCharLen, offsetBytes, utf8LengthBytes, refByteArr, refReg, dstMem2);
}
private static void assertRoundTrips(String refStr, int refSubCharLen, int offsetBytes,
int utf8LengthBytes, byte[] refByteArr, Memory refMem, WritableMemory dstMem)
throws IOException {
StringBuilder sb = new StringBuilder();
int charPos = refMem.getCharsFromUtf8(offsetBytes, utf8LengthBytes, sb);
checkStrings(sb.toString(), new String(refByteArr, offsetBytes, utf8LengthBytes, UTF_8));
assertEquals(charPos, refSubCharLen);
CharBuffer cb = CharBuffer.allocate(refByteArr.length + 1);
cb.position(1);
// Make CharBuffer 1-based, to check correct offset handling
cb = cb.slice();
refMem.getCharsFromUtf8(offsetBytes, utf8LengthBytes, cb);
cb.flip();
checkStrings(cb.toString(), new String(refByteArr, offsetBytes, utf8LengthBytes, UTF_8));
long encodedUtf8Bytes = dstMem.putCharsToUtf8(0, refStr); //encodes entire refStr
assertEquals(encodedUtf8Bytes, refByteArr.length); //compares bytes length
//compare the actual bytes encoded
assertEquals(0, dstMem.compareTo(0, refByteArr.length, refMem, 0, refByteArr.length));
// Test write overflow
WritableMemory writeMem2 = WritableMemory.allocate(refByteArr.length - 1);
try {
writeMem2.putCharsToUtf8(0, refStr);
fail();
} catch (Utf8CodingException e) {
// Expected.
}
}
private static boolean isSurrogateCodePoint(final int cp) {
return (cp >= Character.MIN_SURROGATE) && (cp <= Character.MAX_SURROGATE);
}
private static void checkStrings(String actual, String expected) {
if (!expected.equals(actual)) {
fail("Failure: Expected (" + codepoints(expected) + ") Actual (" + codepoints(actual) + ")");
}
}
private static List<String> codepoints(String str) {
List<String> codepoints = new ArrayList<>();
for (int i = 0; i < str.length(); i++) {
codepoints.add(Long.toHexString(str.charAt(i)));
}
return codepoints;
}
@Test
public void printlnTest() {
println("PRINTING: " + this.getClass().getName());
}
/**
* @param s value to print
*/
static void println(String s) {
//System.out.println(s); //disable here
}
}
| 2,319 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/ZeroCapacityTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.testng.Assert.assertEquals;
import java.nio.ByteBuffer;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.annotations.Test;
/**
* Although allocating zero bytes may be a bug, it is tolerated in Java.
*
* @author Lee Rhodes
*/
public class ZeroCapacityTest {
@Test
public void checkZeroCapacity() throws Exception {
WritableMemory wmem = WritableMemory.allocate(0);
assertEquals(wmem.getCapacity(), 0);
Memory.wrap(new byte[0]);
Memory.wrap(ByteBuffer.allocate(0));
Memory mem3 = Memory.wrap(ByteBuffer.allocateDirect(0));
mem3.region(0, 0);
WritableMemory mem = WritableMemory.allocateDirect(0);
mem.close();
}
@Test
public void printlnTest() {
//println("PRINTING: "+this.getClass().getName());
}
/**
* @param s value to print
*/
static void println(String s) {
//System.out.println(s); //disable here
}
}
| 2,320 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/WritableDirectCopyTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.fail;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.MemoryBoundsException;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.annotations.Test;
/**
* @author Lee Rhodes
*/
public class WritableDirectCopyTest {
//Copy Within tests
@Test
public void checkCopyWithinNativeSmall() {
int memCapacity = 64;
int half = memCapacity / 2;
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
mem.clear();
for (int i = 0; i < half; i++) { //fill first half
mem.putByte(i, (byte) i);
}
mem.copyTo(0, mem, half, half);
for (int i = 0; i < half; i++) {
assertEquals(mem.getByte(i + half), (byte) i);
}
}
}
@Test
public void checkCopyWithinNativeLarge() {
int memCapacity = (2 << 20) + 64;
int memCapLongs = memCapacity / 8;
int halfBytes = memCapacity / 2;
int halfLongs = memCapLongs / 2;
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
mem.clear();
for (int i = 0; i < halfLongs; i++) {
mem.putLong(i * 8, i);
}
mem.copyTo(0, mem, halfBytes, halfBytes);
for (int i = 0; i < halfLongs; i++) {
assertEquals(mem.getLong((i + halfLongs) * 8), i);
}
}
}
@Test
public void checkCopyWithinNativeOverlap() {
int memCapacity = 64;
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
mem.clear();
//println(mem.toHexString("Clear 64", 0, memCapacity));
for (int i = 0; i < (memCapacity / 2); i++) {
mem.putByte(i, (byte) i);
}
//println(mem.toHexString("Set 1st 32 to ints ", 0, memCapacity));
mem.copyTo(0, mem, memCapacity / 4, memCapacity / 2); //overlap is OK
}
}
@Test
public void checkCopyWithinNativeSrcBound() {
int memCapacity = 64;
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
mem.copyTo(32, mem, 32, 33); //hit source bound check
fail("Did Not Catch Assertion Error: source bound");
} catch (MemoryBoundsException e) {
//pass
}
}
@Test
public void checkCopyWithinNativeDstBound() {
int memCapacity = 64;
try (WritableMemory mem = WritableMemory.allocateDirect(memCapacity)) {
mem.copyTo(0, mem, 32, 33); //hit dst bound check
fail("Did Not Catch Assertion Error: dst bound");
} catch (MemoryBoundsException e) {
//pass
}
}
@Test
public void checkCopyCrossNativeSmall() {
int memCapacity = 64;
try (WritableMemory mem1 = WritableMemory.allocateDirect(memCapacity);
WritableMemory mem2 = WritableMemory.allocateDirect(memCapacity)) {
for (int i = 0; i < memCapacity; i++) {
mem1.putByte(i, (byte) i);
}
mem2.clear();
mem1.copyTo(0, mem2, 0, memCapacity);
for (int i = 0; i < memCapacity; i++) {
assertEquals(mem2.getByte(i), (byte) i);
}
}
}
@Test
public void checkCopyCrossNativeLarge() {
int memCapacity = (2 << 20) + 64;
int memCapLongs = memCapacity / 8;
try (WritableMemory mem1 = WritableMemory.allocateDirect(memCapacity);
WritableMemory mem2 = WritableMemory.allocateDirect(memCapacity)) {
for (int i = 0; i < memCapLongs; i++) {
mem1.putLong(i * 8, i);
}
mem2.clear();
mem1.copyTo(0, mem2, 0, memCapacity);
for (int i = 0; i < memCapLongs; i++) {
assertEquals(mem2.getLong(i * 8), i);
}
}
}
@Test
public void checkCopyCrossNativeAndByteArray() {
int memCapacity = 64;
try (WritableMemory mem1 = WritableMemory.allocateDirect(memCapacity)) {
for (int i = 0; i < mem1.getCapacity(); i++) {
mem1.putByte(i, (byte) i);
}
WritableMemory mem2 = WritableMemory.allocate(memCapacity);
mem1.copyTo(8, mem2, 16, 16);
for (int i = 0; i < 16; i++) {
assertEquals(mem1.getByte(8 + i), mem2.getByte(16 + i));
}
//println(mem2.toHexString("Mem2", 0, (int)mem2.getCapacity()));
}
}
@Test
public void checkCopyCrossRegionsSameNative() {
int memCapacity = 128;
try (WritableMemory mem1 = WritableMemory.allocateDirect(memCapacity)) {
for (int i = 0; i < mem1.getCapacity(); i++) {
mem1.putByte(i, (byte) i);
}
//println(mem1.toHexString("Mem1", 0, (int)mem1.getCapacity()));
Memory reg1 = mem1.region(8, 16);
//println(reg1.toHexString("Reg1", 0, (int)reg1.getCapacity()));
WritableMemory reg2 = mem1.writableRegion(24, 16);
//println(reg2.toHexString("Reg2", 0, (int)reg2.getCapacity()));
reg1.copyTo(0, reg2, 0, 16);
for (int i = 0; i < 16; i++) {
assertEquals(reg1.getByte(i), reg2.getByte(i));
assertEquals(mem1.getByte(8 + i), mem1.getByte(24 + i));
}
//println(mem1.toHexString("Mem1", 0, (int)mem1.getCapacity()));
}
}
@Test
public void checkCopyCrossNativeArrayAndHierarchicalRegions() {
int memCapacity = 64;
try (WritableMemory mem1 = WritableMemory.allocateDirect(memCapacity)) {
for (int i = 0; i < mem1.getCapacity(); i++) { //fill with numbers
mem1.putByte(i, (byte) i);
}
//println(mem1.toHexString("Mem1", 0, (int)mem1.getCapacity()));
WritableMemory mem2 = WritableMemory.allocate(memCapacity);
Memory reg1 = mem1.region(8, 32);
Memory reg1B = reg1.region(8, 16);
//println(reg1.toHexString("Reg1", 0, (int)reg1.getCapacity()));
//println(reg1B.toHexString("Reg1B", 0, (int)reg1B.getCapacity()));
WritableMemory reg2 = mem2.writableRegion(32, 16);
reg1B.copyTo(0, reg2, 0, 16);
//println(reg2.toHexString("Reg2", 0, (int)reg2.getCapacity()));
//println(mem2.toHexString("Mem2", 0, (int)mem2.getCapacity()));
for (int i = 32, j = 16; i < 40; i++, j++) {
assertEquals(mem2.getByte(i), j);
}
}
}
@Test
public void printlnTest() {
println("PRINTING: " + this.getClass().getName());
}
/**
* @param s value to print
*/
static void println(String s) {
//System.out.println(s); //disable here
}
}
| 2,321 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/BufferBoundaryCheckTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import org.apache.datasketches.memory.MemoryBoundsException;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.annotations.Test;
public class BufferBoundaryCheckTest {
private final WritableMemory writableMemory = WritableMemory.allocate(8);
@Test(expectedExceptions = MemoryBoundsException.class)
public void testGetBoolean() {
writableMemory.getBoolean(8);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testPutBoolean() {
writableMemory.putBoolean(8, true);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testGetByte() {
writableMemory.getByte(8);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testPutByte() {
writableMemory.putByte(8, (byte) 1);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testGetChar() {
writableMemory.getChar(7);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testPutChar() {
writableMemory.putChar(7, 'a');
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testGetShort() {
writableMemory.getShort(7);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testPutShort() {
writableMemory.putShort(7, (short) 1);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testGetInt() {
writableMemory.getInt(5);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testPutInt() {
writableMemory.putInt(5, 1);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testGetFloat() {
writableMemory.getFloat(5);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testPutFloat() {
writableMemory.putFloat(5, 1f);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testGetLong() {
writableMemory.getLong(1);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testPutLong() {
writableMemory.putLong(1, 1L);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testGetDouble() {
writableMemory.getDouble(1);
}
@Test(expectedExceptions = MemoryBoundsException.class)
public void testPutDouble() {
writableMemory.putDouble(1, 1d);
}
}
| 2,322 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/BufferInvariantsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.fail;
import java.nio.ByteBuffer;
import org.apache.datasketches.memory.Buffer;
import org.apache.datasketches.memory.BufferPositionInvariantsException;
import org.apache.datasketches.memory.WritableBuffer;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.annotations.Test;
/**
* @author Lee Rhodes
*/
public class BufferInvariantsTest {
@Test
public void testRegion() {
ByteBuffer byteBuffer = ByteBuffer.allocate(10);
byteBuffer.limit(7);
Buffer buff = Buffer.wrap(byteBuffer); //assuming buff has cap of 8
assertEquals(buff.getCapacity(), 10); //wrong should be 8
buff.getByte(); //pos moves to 1
Buffer copyBuff = buff.region(); //pos: 0, start: 0, end: 6: cap: 7
assertEquals(copyBuff.getEnd(), 6);
assertEquals(copyBuff.getCapacity(), 6);
assertEquals(copyBuff.getStart(), 0);
assertEquals(copyBuff.getPosition(), 0);
buff.setStartPositionEnd(1, 1, 5);
buff.getByte();
Buffer copyBuff2 = buff.region();
assertEquals(copyBuff2.getEnd(), 3);
assertEquals(copyBuff2.getCapacity(), 3);
assertEquals(copyBuff2.getStart(), 0);
assertEquals(copyBuff2.getPosition(), 0);
}
@Test
public void testBB() {
int n = 25;
ByteBuffer bb = ByteBuffer.allocate(n);
for (byte i = 0; i < n; i++) { bb.put(i, i); }
assertEquals(bb.position(), 0);
assertEquals(bb.limit(), n);
assertEquals(bb.get(0), 0);
// print("Orig : ");
// printbb(bb);
bb.limit(20);
bb.position(5);
assertEquals(bb.remaining(), 15);
// print("Set : ");
// printbb(bb);
ByteBuffer dup = bb.duplicate();
assertEquals(dup.position(), 5);
assertEquals(dup.limit(), 20);
assertEquals(dup.capacity(), 25);
// print("Dup : ");
// printbb(dup);
ByteBuffer sl = bb.slice();
assertEquals(sl.position(), 0);
assertEquals(sl.limit(), 15);
assertEquals(sl.capacity(), 15);
// print("Slice: ");
// printbb(sl);
}
@Test
public void testBuf() {
int n = 25;
WritableBuffer buf = WritableMemory.allocate(n).asWritableBuffer();
for (byte i = 0; i < n; i++) { buf.putByte(i); }
buf.setPosition(0);
assertEquals(buf.getPosition(), 0);
assertEquals(buf.getEnd(), 25);
assertEquals(buf.getCapacity(), 25);
// print("Orig : ");
// printbuf(buf);
buf.setStartPositionEnd(0, 5, 20);
assertEquals(buf.getRemaining(), 15);
assertEquals(buf.getCapacity(), 25);
assertEquals(buf.getByte(), 5);
buf.setPosition(5);
// print("Set : ");
// printbuf(buf);
Buffer dup = buf.duplicate();
assertEquals(dup.getRemaining(), 15);
assertEquals(dup.getCapacity(), 25);
assertEquals(dup.getByte(), 5);
dup.setPosition(5);
// print("Dup : ");
// printbuf(dup);
Buffer reg = buf.region();
assertEquals(reg.getPosition(), 0);
assertEquals(reg.getEnd(), 15);
assertEquals(reg.getRemaining(), 15);
assertEquals(reg.getCapacity(), 15);
assertEquals(reg.getByte(), 5);
reg.setPosition(0);
// print("Region: ");
// printbuf(reg);
}
@Test
public void testBufWrap() {
int n = 25;
ByteBuffer bb = ByteBuffer.allocate(n);
for (byte i = 0; i < n; i++) { bb.put(i, i); }
bb.position(5);
bb.limit(20);
Buffer buf = Buffer.wrap(bb);
assertEquals(buf.getPosition(), 5);
assertEquals(buf.getEnd(), 20);
assertEquals(buf.getRemaining(), 15);
assertEquals(buf.getCapacity(), 25);
assertEquals(buf.getByte(), 5);
buf.setPosition(5);
// print("Buf.wrap: ");
// printbuf(buf);
Buffer reg = buf.region();
assertEquals(reg.getPosition(), 0);
assertEquals(reg.getEnd(), 15);
assertEquals(reg.getRemaining(), 15);
assertEquals(reg.getCapacity(), 15);
assertEquals(reg.getByte(), 5);
reg.setPosition(0);
// print("Buf.region: ");
// printbuf(reg);
}
@Test
public void checkLimitsDirect() throws Exception {
try (WritableMemory wmem = WritableMemory.allocateDirect(100)) {
Buffer buf = wmem.asBuffer();
buf.setStartPositionEnd(40, 45, 50);
buf.setStartPositionEnd(0, 0, 100);
try {
buf.setStartPositionEnd(0, 0, 101);
fail();
} catch (BufferPositionInvariantsException e) {
//ok
}
}
}
@Test
public void testRegionDirect() {
ByteBuffer byteBuffer = ByteBuffer.allocate(10);
byteBuffer.limit(7);
Buffer buff = Buffer.wrap(byteBuffer); //assuming buff has cap of 8
assertEquals(buff.getCapacity(), 10); //wrong should be 8
buff.getByte(); //pos moves to 1
Buffer copyBuff = buff.region(); //pos: 0, start: 0, end: 6: cap: 7
assertEquals(copyBuff.getEnd(), 6);
assertEquals(copyBuff.getCapacity(), 6);
assertEquals(copyBuff.getStart(), 0);
assertEquals(copyBuff.getPosition(), 0);
buff.setStartPositionEnd(1, 1, 5);
buff.getByte();
Buffer copyBuff2 = buff.region();
assertEquals(copyBuff2.getEnd(), 3);
assertEquals(copyBuff2.getCapacity(), 3);
assertEquals(copyBuff2.getStart(), 0);
assertEquals(copyBuff2.getPosition(), 0);
}
@Test
public void testBBDirect() {
int n = 25;
ByteBuffer bb = ByteBuffer.allocateDirect(n);
for (byte i = 0; i < n; i++) { bb.put(i, i); }
assertEquals(bb.position(), 0);
assertEquals(bb.limit(), n);
assertEquals(bb.get(0), 0);
// print("Orig : ");
// printbb(bb);
bb.limit(20);
bb.position(5);
assertEquals(bb.remaining(), 15);
// print("Set : ");
// printbb(bb);
ByteBuffer dup = bb.duplicate();
assertEquals(dup.position(), 5);
assertEquals(dup.limit(), 20);
assertEquals(dup.capacity(), 25);
// print("Dup : ");
// printbb(dup);
ByteBuffer sl = bb.slice();
assertEquals(sl.position(), 0);
assertEquals(sl.limit(), 15);
assertEquals(sl.capacity(), 15);
// print("Slice: ");
// printbb(sl);
}
@Test
public void testBufDirect() throws Exception {
int n = 25;
try (WritableMemory wmem = WritableMemory.allocateDirect(n)) {
WritableBuffer buf = wmem.asWritableBuffer();
for (byte i = 0; i < n; i++) { buf.putByte(i); }
buf.setPosition(0);
assertEquals(buf.getPosition(), 0);
assertEquals(buf.getEnd(), 25);
assertEquals(buf.getCapacity(), 25);
// print("Orig : ");
// printbuf(buf);
buf.setStartPositionEnd(0, 5, 20);
assertEquals(buf.getRemaining(), 15);
assertEquals(buf.getCapacity(), 25);
assertEquals(buf.getByte(), 5);
buf.setPosition(5);
// print("Set : ");
// printbuf(buf);
Buffer dup = buf.duplicate();
assertEquals(dup.getRemaining(), 15);
assertEquals(dup.getCapacity(), 25);
assertEquals(dup.getByte(), 5);
dup.setPosition(5);
// print("Dup : ");
// printbuf(dup);
Buffer reg = buf.region();
assertEquals(reg.getPosition(), 0);
assertEquals(reg.getEnd(), 15);
assertEquals(reg.getRemaining(), 15);
assertEquals(reg.getCapacity(), 15);
assertEquals(reg.getByte(), 5);
reg.setPosition(0);
// print("Region: ");
// printbuf(reg);
}
}
@Test
public void testBufWrapDirect() {
int n = 25;
ByteBuffer bb = ByteBuffer.allocateDirect(n);
for (byte i = 0; i < n; i++) { bb.put(i, i); }
bb.position(5);
bb.limit(20);
Buffer buf = Buffer.wrap(bb);
assertEquals(buf.getPosition(), 5);
assertEquals(buf.getEnd(), 20);
assertEquals(buf.getRemaining(), 15);
assertEquals(buf.getCapacity(), 25);
assertEquals(buf.getByte(), 5);
buf.setPosition(5);
// print("Buf.wrap: ");
// printbuf(buf);
Buffer reg = buf.region();
assertEquals(reg.getPosition(), 0);
assertEquals(reg.getEnd(), 15);
assertEquals(reg.getRemaining(), 15);
assertEquals(reg.getCapacity(), 15);
assertEquals(reg.getByte(), 5);
reg.setPosition(0);
// print("Buf.region: ");
// printbuf(reg);
}
static void printbb(ByteBuffer bb) {
println("pos: " + bb.position() + ", lim: " + bb.limit() + ", cap: " + bb.capacity());
int rem = bb.remaining();
int pos = bb.position();
int i;
for (i = 0; i < (rem - 1); i++) {
print(bb.get(i + pos) + ", ");
}
println(bb.get(i + pos) + "\n");
}
static void printbuf(Buffer buf) {
println("pos: " + buf.getPosition() + ", end: " + buf.getEnd() + ", cap: " + buf.getCapacity());
long rem = buf.getRemaining();
long pos = buf.getPosition();
int i;
for (i = 0; i < (rem - 1); i++) {
print(buf.getByte(i + pos) + ", ");
}
println(buf.getByte(i + pos) + "\n");
}
/**
* @param s value to print
*/
static void println(String s) {
//System.out.println(s); //disable here
}
/**
* @param s value to print
*/
static void print(String s) {
//System.out.print(s); //disable here
}
}
| 2,323 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/NonNativeWritableMemoryImplTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.testng.Assert.assertEquals;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.annotations.Test;
/**
* @author Lee Rhodes
*/
public class NonNativeWritableMemoryImplTest {
private byte[] bArr = new byte[8];
private final WritableMemory wmem = WritableMemory.writableWrap(bArr, ByteOrder.BIG_ENDIAN);
//Check primitives
@Test
public void checkCharacters() {
int m = Character.BYTES;
int n = ((1 << 20) / m) + m;
byte[] arr1 = new byte[n * m]; //non-native
//put & get
WritableMemory wmem1 = WritableMemory.writableWrap(arr1, ByteOrder.BIG_ENDIAN);
for (int i = 0; i < n; i++) { wmem1.putChar(i * m, (char) i++); }
for (int i = 0; i < n; i++) {
assertEquals(wmem1.getChar(i * m), (char) i++);
}
//getArr & putArr
char[] cArr = new char[n]; //native
wmem1.getCharArray(0, cArr, 0, n); //wmem is non-native
byte[] arr2 = new byte[n * m];
WritableMemory wmem2 = WritableMemory.writableWrap(arr2, ByteOrder.BIG_ENDIAN);
wmem2.putCharArray(0, cArr, 0, n);
assertEquals(arr2, arr1);
}
@Test
public void checkDoubles() {
int m = Double.BYTES;
int n = ((1 << 20) / m) + m;
byte[] arr1 = new byte[n * m]; //non-native
//put & get
WritableMemory wmem1 = WritableMemory.writableWrap(arr1, ByteOrder.BIG_ENDIAN);
double dbl = 1.0;
for (int i = 0; i < n; i++) { wmem1.putDouble(i * m, dbl++); }
dbl = 1.0;
for (int i = 0; i < n; i++) {
assertEquals(wmem1.getDouble(i * m), dbl++);
}
//getArr & putArr
double[] dblArr = new double[n]; //native
wmem1.getDoubleArray(0, dblArr, 0, n); //wmem is non-native
byte[] arr2 = new byte[n * m];
WritableMemory wmem2 = WritableMemory.writableWrap(arr2, ByteOrder.BIG_ENDIAN);
wmem2.putDoubleArray(0, dblArr, 0, n);
assertEquals(arr2, arr1);
}
@Test
public void checkFloats() {
int m = Float.BYTES;
int n = ((1 << 20) / m) + m;
byte[] arr1 = new byte[n * m]; //non-native
//put & get
WritableMemory wmem1 = WritableMemory.writableWrap(arr1, ByteOrder.BIG_ENDIAN);
float flt = 1.0F;
for (int i = 0; i < n; i++) { wmem1.putFloat(i * m, flt++); }
flt = 1.0F;
for (int i = 0; i < n; i++) {
assertEquals(wmem1.getFloat(i * m), flt++);
}
//getArr & putArr
float[] fltArr = new float[n]; //native
wmem1.getFloatArray(0, fltArr, 0, n); //wmem is non-native
byte[] arr2 = new byte[n * m];
WritableMemory wmem2 = WritableMemory.writableWrap(arr2, ByteOrder.BIG_ENDIAN);
wmem2.putFloatArray(0, fltArr, 0, n);
assertEquals(arr2, arr1);
}
@Test
public void checkInts() {
int m = Integer.BYTES;
int n = ((1 << 20) / m) + m;
byte[] arr1 = new byte[n * m]; //non-native
//put & get
WritableMemory wmem1 = WritableMemory.writableWrap(arr1, ByteOrder.BIG_ENDIAN);
int intg = 1;
for (int i = 0; i < n; i++) { wmem1.putInt(i * m, intg++); }
intg = 1;
for (int i = 0; i < n; i++) {
assertEquals(wmem1.getInt(i * m), intg++);
}
//getArr & putArr
int[] intArr = new int[n]; //native
wmem1.getIntArray(0, intArr, 0, n); //wmem is non-native
byte[] arr2 = new byte[n * m];
WritableMemory wmem2 = WritableMemory.writableWrap(arr2, ByteOrder.BIG_ENDIAN);
wmem2.putIntArray(0, intArr, 0, n);
assertEquals(arr2, arr1);
}
@Test
public void checkLongs() {
int m = Long.BYTES;
int n = ((1 << 20) / m) + m;
byte[] arr1 = new byte[n * m]; //non-native
//put & get
WritableMemory wmem1 = WritableMemory.writableWrap(arr1, ByteOrder.BIG_ENDIAN);
long lng = 1;
for (int i = 0; i < n; i++) { wmem1.putLong(i * m, lng++); }
lng = 1;
for (int i = 0; i < n; i++) {
assertEquals(wmem1.getLong(i * m), lng++);
}
//getArr & putArr
long[] longArr = new long[n]; //native
wmem1.getLongArray(0, longArr, 0, n); //wmem is non-native
byte[] arr2 = new byte[n * m];
WritableMemory wmem2 = WritableMemory.writableWrap(arr2, ByteOrder.BIG_ENDIAN);
wmem2.putLongArray(0, longArr, 0, n);
assertEquals(arr2, arr1);
}
@Test
public void checkShorts() {
int m = Short.BYTES;
int n = ((1 << 20) / m) + m;
byte[] arr1 = new byte[n * m]; //non-native
//put & get
WritableMemory wmem1 = WritableMemory.writableWrap(arr1, ByteOrder.BIG_ENDIAN);
short sht = 1;
for (int i = 0; i < n; i++) { wmem1.putShort(i * m, sht++); }
sht = 1;
for (int i = 0; i < n; i++) {
assertEquals(wmem1.getShort(i * m), sht++);
}
//getArr & putArr
short[] shortArr = new short[n]; //native
wmem1.getShortArray(0, shortArr, 0, n); //wmem is non-native
byte[] arr2 = new byte[n * m];
WritableMemory wmem2 = WritableMemory.writableWrap(arr2, ByteOrder.BIG_ENDIAN);
wmem2.putShortArray(0, shortArr, 0, n);
assertEquals(arr2, arr1);
}
//check Region
@Test
public void checkRegion() {
WritableMemory wreg = wmem.writableRegion(0, wmem.getCapacity());
assertEquals(wreg.getByteOrder(), ByteOrder.BIG_ENDIAN);
}
@Test
public void checkRegionZeros() {
byte[] bArr1 = new byte[0];
WritableMemory wmem1 = WritableMemory.writableWrap(bArr1, ByteOrder.BIG_ENDIAN);
Memory reg = wmem1.region(0, wmem1.getCapacity());
assertEquals(reg.getByteOrder(), ByteOrder.LITTLE_ENDIAN);
}
}
| 2,324 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/test/java/org/apache/datasketches/memory/internal/XxHash64LoopingTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.testng.Assert.assertEquals;
import org.apache.datasketches.memory.WritableMemory;
import org.testng.annotations.Test;
/**
* @author Lee Rhodes
*/
public class XxHash64LoopingTest {
/*
* This test is adapted from
* <a href="https://github.com/OpenHFT/Zero-Allocation-Hashing/blob/master/src/test/java/net/openhft/hashing/XxHashTest.java">
* OpenHFT/Zero-Allocation-Hashing</a> to test hash compatibility with that implementation.
* See LICENSE.
*/
@Test
public void testWithSeed() {
long seed = 42L;
for (int i = 0; i < 1025; i++) {
byte[] byteArr = new byte[i];
for (int j = 0; j < byteArr.length; j++) { byteArr[j] = (byte) j; }
WritableMemory wmem = WritableMemory.writableWrap(byteArr);
long hash = wmem.xxHash64(0, byteArr.length, seed);
assertEquals(hash, HASHES_OF_LOOPING_BYTES_WITH_SEED_42[i]);
}
}
/*This data is from
* <a href="https://github.com/OpenHFT/Zero-Allocation-Hashing/blob/master/src/test/java/net/openhft/hashing/XxHashTest.java">
* OpenHFT/Zero-Allocation-Hashing</a> to test hash compatibility with that implementation.
* See LICENSE.
*/
private static final long[] HASHES_OF_LOOPING_BYTES_WITH_SEED_42 = {
-7444071767201028348L,
-8959994473701255385L,
7116559933691734543L,
6019482000716350659L,
-6625277557348586272L,
-5507563483608914162L,
1540412690865189709L,
4522324563441226749L,
-7143238906056518746L,
-7989831429045113014L,
-7103973673268129917L,
-2319060423616348937L,
-7576144055863289344L,
-8903544572546912743L,
6376815151655939880L,
5913754614426879871L,
6466567997237536608L,
-869838547529805462L,
-2416009472486582019L,
-3059673981515537339L,
4211239092494362041L,
1414635639471257331L,
166863084165354636L,
-3761330575439628223L,
3524931906845391329L,
6070229753198168844L,
-3740381894759773016L,
-1268276809699008557L,
1518581707938531581L,
7988048690914090770L,
-4510281763783422346L,
-8988936099728967847L,
-8644129751861931918L,
2046936095001747419L,
339737284852751748L,
-8493525091666023417L,
-3962890767051635164L,
-5799948707353228709L,
-6503577434416464161L,
7718729912902936653L,
191197390694726650L,
-2677870679247057207L,
20411540801847004L,
2738354376741059902L,
-3754251900675510347L,
-3208495075154651980L,
5505877218642938179L,
6710910171520780908L,
-9060809096139575515L,
6936438027860748388L,
-6675099569841255629L,
-5358120966884144380L,
-4970515091611332076L,
-1810965683604454696L,
-516197887510505242L,
1240864593087756274L,
6033499571835033332L,
7223146028771530185L,
909128106589125206L,
1567720774747329341L,
-1867353301780159863L,
4655107429511759333L,
5356891185236995950L,
182631115370802890L,
-3582744155969569138L,
595148673029792797L,
495183136068540256L,
5536689004903505647L,
-8472683670935785889L,
-4335021702965928166L,
7306662983232020244L,
4285260837125010956L,
8288813008819191181L,
-3442351913745287612L,
4883297703151707194L,
9135546183059994964L,
123663780425483012L,
509606241253238381L,
5940344208569311369L,
-2650142344608291176L,
3232776678942440459L,
-922581627593772181L,
7617977317085633049L,
7154902266379028518L,
-5806388675416795571L,
4368003766009575737L,
-2922716024457242064L,
4771160713173250118L,
3275897444752647349L,
-297220751499763878L,
5095659287766176401L,
1181843887132908826L,
9058283605301070357L,
3984713963471276643L,
6050484112980480005L,
1551535065359244224L,
565337293533335618L,
7412521035272884309L,
-4735469481351389369L,
6998597101178745656L,
-9107075101236275961L,
5879828914430779796L,
6034964979406620806L,
5666406915264701514L,
-4666218379625258428L,
2749972203764815656L,
-782986256139071446L,
6830581400521008570L,
2588852022632995043L,
-5484725487363818922L,
-3319556935687817112L,
6481961252981840893L,
2204492445852963006L,
-5301091763401031066L,
-2615065677047206256L,
-6769817545131782460L,
-8421640685322953142L,
-3669062629317949176L,
-9167016978640750490L,
2783671191687959562L,
-7599469568522039782L,
-7589134103255480011L,
-5932706841188717592L,
-8689756354284562694L,
-3934347391198581249L,
-1344748563236040701L,
2172701592984478834L,
-5322052340624064417L,
-8493945390573620511L,
3349021988137788403L,
-1806262525300459538L,
-8091524448239736618L,
4022306289903960690L,
-8346915997379834224L,
-2106001381993805461L,
-5784123934724688161L,
6775158099649720388L,
-3869682756870293568L,
4356490186652082006L,
8469371446702290916L,
-2972961082318458602L,
-7188106622222784561L,
-4961006366631572412L,
3199991182014172900L,
2917435868590434179L,
8385845305547872127L,
7706824402560674655L,
-1587379863634865277L,
-4212156212298809650L,
-1305209322000720233L,
-7866728337506665880L,
8195089740529247049L,
-4876930125798534239L,
798222697981617129L,
-2441020897729372845L,
-3926158482651178666L,
-1254795122048514130L,
5192463866522217407L,
-5426289318796042964L,
-3267454004443530826L,
471043133625225785L,
-660956397365869974L,
-6149209189144999161L,
-2630977660039166559L,
8512219789663151219L,
-3309844068134074620L,
-6211275327487847132L,
-2130171729366885995L,
6569302074205462321L,
4855778342281619706L,
3867211421508653033L,
-3002480002418725542L,
-8297543107467502696L,
8049642289208775831L,
-5439825716055425635L,
7251760070798756432L,
-4774526021749797528L,
-3892389575184442548L,
5162451061244344424L,
6000530226398686578L,
-5713092252241819676L,
8740913206879606081L,
-8693282419677309723L,
1576205127972543824L,
5760354502610401246L,
3173225529903529385L,
1785166236732849743L,
-1024443476832068882L,
-7389053248306187459L,
1171021620017782166L,
1471572212217428724L,
7720766400407679932L,
-8844781213239282804L,
-7030159830170200877L,
2195066352895261150L,
1343620937208608634L,
9178233160016731645L,
-757883447602665223L,
3303032934975960867L,
-3685775162104101116L,
-4454903657585596656L,
-5721532367620482629L,
8453227136542829644L,
5397498317904798888L,
7820279586106842836L,
-2369852356421022546L,
3910437403657116169L,
6072677490463894877L,
-2651044781586183960L,
5173762670440434510L,
-2970017317595590978L,
-1024698859439768763L,
-3098335260967738522L,
-1983156467650050768L,
-8132353894276010246L,
-1088647368768943835L,
-3942884234250555927L,
7169967005748210436L,
2870913702735953746L,
-2207022373847083021L,
1104181306093040609L,
5026420573696578749L,
-5874879996794598513L,
-4777071762424874671L,
-7506667858329720470L,
-2926679936584725232L,
-5530649174168373609L,
5282408526788020384L,
3589529249264153135L,
-6220724706210580398L,
-7141769650716479812L,
5142537361821482047L,
-7029808662366864423L,
-6593520217660744466L,
1454581737122410695L,
-139542971769349865L,
1727752089112067235L,
-775001449688420017L,
-5011311035350652032L,
-8671171179275033159L,
-2850915129917664667L,
-5258897903906998781L,
-6954153088230718761L,
-4070351752166223959L,
-6902592976462171099L,
-7850366369290661391L,
-4562443925864904705L,
3186922928616271015L,
2208521081203400591L,
-2727824999830592777L,
-3817861137262331295L,
2236720618756809066L,
-4888946967413746075L,
-446884183491477687L,
-43021963625359034L,
-5857689226703189898L,
-2156533592262354883L,
-2027655907961967077L,
7151844076490292500L,
-5029149124756905464L,
526404452686156976L,
8741076980297445408L,
7962851518384256467L,
-105985852299572102L,
-2614605270539434398L,
-8265006689379110448L,
8158561071761524496L,
-6923530157382047308L,
5551949335037580397L,
565709346370307061L,
-4780869469938333359L,
6931895917517004830L,
565234767538051407L,
-8663136372880869656L,
1427340323685448983L,
6492705666640232290L,
1481585578088475369L,
-1712711110946325531L,
3281685342714380741L,
6441384790483098576L,
-1073539554682358394L,
5704050067194788964L,
-5495724689443043319L,
-5425043165837577535L,
8349736730194941321L,
-4123620508872850061L,
4687874980541143573L,
-468891940172550975L,
-3212254545038049829L,
-6830802881920725628L,
9033050533972480988L,
4204031879107709260L,
-677513987701096310L,
-3286978557209370155L,
1644111582609113135L,
2040089403280131741L,
3323690950628902653L,
-7686964480987925756L,
-4664519769497402737L,
3358384147145476542L,
-4699919744264452277L,
-4795197464927839170L,
5051607253379734527L,
-8987703459734976898L,
8993686795574431834L,
-2688919474688811047L,
375938183536293311L,
1049459889197081920L,
-1213022037395838295L,
4932989235110984138L,
-6647247877090282452L,
-7698817539128166242L,
-3264029336002462659L,
6487828018122309795L,
-2660821091484592878L,
7104391069028909121L,
-1765840012354703384L,
85428166783788931L,
-6732726318028261938L,
7566202549055682933L,
229664898114413280L,
-1474237851782211353L,
-1571058880058007603L,
-7926453582850712144L,
2487148368914275243L,
8740031015380673473L,
1908345726881363169L,
-2510061320536523178L,
7854780026906019630L,
-6023415596650016493L,
-6264841978089051107L,
4024998278016087488L,
-4266288992025826072L,
-3222176619422665563L,
-1999258726038299316L,
1715270077442385636L,
6764658837948099754L,
-8646962299105812577L,
-51484064212171546L,
-1482515279051057493L,
-8663965522608868414L,
-256555202123523670L,
1973279596140303801L,
-7280796173024508575L,
-5691760367231354704L,
-5915786562256300861L,
-3697715074906156565L,
3710290115318541949L,
6796151623958134374L,
-935299482515386356L,
-7078378973978660385L,
5379481350768846927L,
-9011221735308556302L,
5936568631579608418L,
-6060732654964511813L,
-4243141607840017809L,
3198488845875349355L,
-7809288876010447646L,
4371587872421472389L,
-1304197371105522943L,
7389861473143460103L,
-1892352887992004024L,
2214828764044713398L,
6347546952883613388L,
1275694314105480954L,
-5262663163358903733L,
1524757505892047607L,
1474285098416162746L,
-7976447341881911786L,
4014100291977623265L,
8994982266451461043L,
-7737118961020539453L,
-2303955536994331092L,
1383016539349937136L,
1771516393548245271L,
-5441914919967503849L,
5449813464890411403L,
-3321280356474552496L,
4084073849712624363L,
4290039323210935932L,
2449523715173349652L,
7494827882138362156L,
9035007221503623051L,
5722056230130603177L,
-5443061851556843748L,
-7554957764207092109L,
447883090204372074L,
533916651576859197L,
-3104765246501904165L,
-4002281505194601516L,
-8402008431255610992L,
-408273018037005304L,
214196458752109430L,
6458513309998070914L,
2665048360156607904L,
96698248584467992L,
-3238403026096269033L,
6759639479763272920L,
-4231971627796170796L,
-2149574977639731179L,
-1437035755788460036L,
-6000005629185669767L,
145244292800946348L,
-3056352941404947199L,
3748284277779018970L,
7328354565489106580L,
-2176895260373660284L,
3077983936372755601L,
1215485830019410079L,
683050801367331140L,
-3173237622987755212L,
-1951990779107873701L,
-4714366021269652421L,
4934690664256059008L,
1674823104333774474L,
-3974408282362828040L,
2001478896492417760L,
-4115105568354384199L,
-2039694725495941666L,
-587763432329933431L,
-391276713546911316L,
-5543400904809469053L,
1882564440421402418L,
-4991793588968693036L,
3454088185914578321L,
2290855447126188424L,
3027910585026909453L,
2136873580213167431L,
-6243562989966916730L,
5887939953208193029L,
-3491821629467655741L,
-3138303216306660662L,
8572629205737718669L,
4154439973110146459L,
5542921963475106759L,
-2025215496720103521L,
-4047933760493641640L,
-169455456138383823L,
-1164572689128024473L,
-8551078127234162906L,
-7247713218016599028L,
8725299775220778242L,
6263466461599623132L,
7931568057263751768L,
7365493014712655238L,
-7343740914722477108L,
8294118602089088477L,
7677867223984211483L,
-7052188421655969232L,
-3739992520633991431L,
772835781531324307L,
881441588914692737L,
6321450879891466401L,
5682516032668315027L,
8493068269270840662L,
-3895212467022280567L,
-3241911302335746277L,
-7199586338775635848L,
-4606922569968527974L,
-806850906331637768L,
2433670352784844513L,
-5787982146811444512L,
7852193425348711165L,
8669396209073850051L,
-6898875695148963118L,
6523939610287206782L,
-8084962379210153174L,
8159432443823995836L,
-2631068535470883494L,
-338649779993793113L,
6514650029997052016L,
3926259678521802094L,
5443275905907218528L,
7312187582713433551L,
-2993773587362997676L,
-1068335949405953411L,
4499730398606216151L,
8538015793827433712L,
-4057209365270423575L,
-1504284818438273559L,
-6460688570035010846L,
1765077117408991117L,
8278320303525164177L,
8510128922449361533L,
1305722765578569816L,
7250861238779078656L,
-576624504295396147L,
-4363714566147521011L,
-5932111494795524073L,
1837387625936544674L,
-4186755953373944712L,
-7657073597826358867L,
140408487263951108L,
5578463635002659628L,
3400326044813475885L,
-6092804808386714986L,
-2410324417287268694L,
3222007930183458970L,
4932471983280850419L,
3554114546976144528L,
-7216067928362857082L,
-6115289896923351748L,
-6769646077108881947L,
4263895947722578066L,
2939136721007694271L,
1426030606447416658L,
-1316192446807442076L,
5366182640480055129L,
6527003877470258527L,
5849680119000207603L,
5263993237214222328L,
-6936533648789185663L,
-9063642143790846605L,
3795892210758087672L,
4987213125282940176L,
2505500970421590750L,
-1014022559552365387L,
-3574736245968367770L,
1180676507127340259L,
-2261908445207512503L,
-8416682633172243509L,
1114990703652673283L,
7753746660364401380L,
1874908722469707905L,
2033421444403047677L,
21412168602505589L,
385957952615286205L,
2053171460074727107L,
1915131899400103774L,
6680879515029368390L,
568807208929724162L,
-6211541450459087674L,
-5026690733412145448L,
1384781941404886235L,
-98027820852587266L,
1806580495924249669L,
6322077317403503963L,
9078162931419569939L,
-2809061215428363978L,
7697867577577415733L,
-5270063855897737274L,
5649864555290587388L,
-6970990547695444247L,
579684606137331754L,
3871931565451195154L,
2030008578322050218L,
-5012357307111799829L,
-2271365921756144065L,
4551962665158074190L,
-3385474923040271312L,
-7647625164191633577L,
6634635380316963029L,
-5201190933687061585L,
8864818738548593973L,
2855828214210882907L,
9154512990734024165L,
-6945306719789457786L,
1200243352799481087L,
875998327415853787L,
1275313054449881011L,
-6105772045375948736L,
-2926927684328291437L,
9200050852144954779L,
5188726645765880663L,
5197037323312705176L,
3434926231010121611L,
-5054013669361906544L,
2582959199749224670L,
-6053757512723474059L,
-5016308176846054473L,
-2509827316698626133L,
7700343644503853204L,
-1997627249894596731L,
3993168688325352290L,
-8181743677541277704L,
3719056119682565597L,
-7264411659282947790L,
7177028972346484464L,
-5460831176884283278L,
1799904662416293978L,
-6549616005092764514L,
5472403994001122052L,
8683463751708388502L,
-7873363037838316398L,
689134758256487260L,
-1287443614028696450L,
4452712919702709507L,
762909374167538893L,
6594302592326281411L,
1183786629674781984L,
5021847859620133476L,
-2490098069181538915L,
5105145136026716679L,
4437836948098585718L,
1987270426215858862L,
6170312798826946249L,
634297557126003407L,
-1672811625495999581L,
6282971595586218191L,
4549149305727581687L,
-5652165370435317782L,
1064501550023753890L,
-5334885527127139723L,
-6904378001629481237L,
-1807576691784201230L,
-205688432992053911L,
7621619053293393289L,
6258649161313982470L,
-1111634238359342096L,
-8044260779481691987L,
400270655839010807L,
-7806833581382890725L,
-2970563349459508036L,
-7392591524816802798L,
2918924613160219805L,
-6444161627929149002L,
6096497501321778876L,
-1477975665655830038L,
1690651307597306138L,
-2364076888826085362L,
-6521987420014905821L,
-4419193480146960582L,
3538587780233092477L,
8374665961716940404L,
7492412312405424500L,
6311662249091276767L,
-1240235198282023566L,
5478559631401166447L,
3476714419313462133L,
377427285984503784L,
2570472638778991109L,
-2741381313777447835L,
-7123472905503039596L,
2493658686946955193L,
1024677789035847585L,
-2916713904339582981L,
-4532003852004642304L,
-2202143560366234111L,
5832267856442755135L,
-261740607772957384L,
239435959690278014L,
5755548341947719409L,
6138795458221887696L,
-7709506987360146385L,
-6657487758065140444L,
-7006376793203657499L,
6544409861846502033L,
3171929352014159247L,
1051041925048792869L,
2617300158375649749L,
952652799620095175L,
-576661730162168147L,
-1634191369221345988L,
4833656816115993519L,
647566759700005786L,
2473810683785291822L,
3005977181064745326L,
-3321881966853149523L,
7595337666427588699L,
6004093624251057224L,
-563917505657690279L,
6117428527147449302L,
-6287297509522976113L,
-4527219334756214406L,
742626429298092489L,
3057351806086972041L,
645967551210272605L,
-4428701157828864227L,
3236379103879435414L,
-8477089892132066300L,
-6127365537275859058L,
-4052490484706946358L,
-8004854976625046469L,
-3679456917426613424L,
-8212793762082595299L,
-818288739465424130L,
1358812099481667095L,
7835987612195254310L,
-3663247409614323059L,
-2931105150130396604L,
7296136776835614792L,
-2014557408985889628L,
7267662411237959788L,
3699280615819277743L,
-212010675469091396L,
-6518374332458360120L,
145026010541628849L,
1879297324213501001L,
-7146296067751816833L,
-5002958800391379931L,
6060682439924517608L,
-432234782921170964L,
-6669688947353256956L,
7728943532792041267L,
830911367341171721L,
3396934884314289432L,
-779464156662780749L,
2330041851883352285L,
-4783350380736276693L,
-5758476056890049254L,
-7551552301614791791L,
1253334187723911710L,
-2685018208308798978L,
5379636036360946454L,
6154668487114681217L,
-8641287462255458898L,
4676087643800649558L,
-2405142641398691475L,
1088685126864246881L,
6431149082338374041L,
-607357695335069155L,
-720970692129524140L,
2648766932394044468L,
8408344790179354573L,
-6193808387735667350L,
7722524628524697419L,
-6975433852560238120L,
-2925851029234475295L,
-4274458387165211028L,
-8355836377702147319L,
5278146397877332061L,
8502098812383680707L,
2292836642336580326L,
-6127608082651070062L,
2222301962240611208L,
-1930887695854799378L,
7640503480494894592L,
1162652186586436094L,
-1918002592943761683L,
7648998601717261840L,
-8472603250832757057L,
-988877663117552456L,
2368458128168026494L,
-6480813811998475245L,
-5896967824416018967L,
-2593783161701820446L,
6950098417530252598L,
6362589545555771236L,
7981389665448567125L,
3954017080198558850L,
1626078615050230622L,
6650159066527969109L,
697345338922935394L,
-1226816215461768626L,
8740408765973837440L,
-4194155864629568323L,
7016680023232424746L,
6043281358142429469L,
-4201005667174376809L,
1216727117859013155L,
6367202436544203935L,
35414869396444636L,
3715622794033998412L,
488654435687670554L,
-2503747297224687460L,
3147101919441470388L,
-8248611218693190922L,
970697264481229955L,
3411465763826851418L,
9117405004661599969L,
-5204346498331519734L,
-19637460819385174L,
-5039124225167977219L,
2990108874601696668L,
-2623857460235459202L,
4256291692861397446L,
6724147860870760443L,
3558616688507246537L,
6487680097936412800L,
-6470792832935928161L,
4314814550912237614L,
-1292878983006062345L,
6791915152630414174L,
5971652079925815310L,
2557529546662864312L,
466175054322801580L,
-585216717310746872L,
-2486640422147349036L,
7212029603994220134L,
3958995069888972500L,
4950471855791412790L,
-3721948842035712763L,
-6184503487488243051L,
4079570444585775332L,
-3952156172546996872L,
4543894231118208322L,
-1739995588466209963L,
9155948355455935530L,
5821980345462207860L,
-2431287667309520417L,
-3890108130519441316L,
-558124689277030490L,
6079823537335801717L,
5409742395192364262L,
-2329885777717160453L,
-7332804342513677651L,
1466490574975950555L,
-420549419907427929L,
-5249909814389692516L,
-5145692168206210661L,
5934113980649113921L,
3241618428555359661L,
-6622110266160980250L,
5048250878669516223L,
5747219637359976174L,
2975906212588223728L,
5730216838646273215L,
-176713127129024690L,
6734624279336671146L,
5127866734316017180L,
7111761230887705595L,
3457811808274317235L,
3362961434604932375L,
-1877869936854991246L,
7171428594877765665L,
-8252167178400462374L,
-6306888185035821047L,
-6684702191247683887L,
-7754928454824190529L,
-1902605599135704386L,
-4037319846689421239L,
8493746058123583457L,
-8156648963857047193L,
2051510355149839497L,
-1256416624177218909L,
-3344927996254072010L,
-1838853051925943568L,
316927471680974556L,
-1502257066700798003L,
-5836095610125837606L,
-1594125583615895424L,
1442211486559637962L,
-144295071206619569L,
5159850900959273410L,
4589139881166423678L,
-7038726987463097509L,
2886082400772974595L,
2780759114707171916L,
5694649587906297495L,
1260349041268169667L,
4921517488271434890L,
644696475796073018L,
6262811963753436289L,
-6128198676595868773L,
-3625352083004760261L,
-8751453332943236675L,
8749249479868749221L,
-2450808199545048250L,
-6517435817046180917L,
-3433321727429234998L,
-2591586258908763451L,
3847750870868804507L,
6603614438546398643L,
-7598682191291031287L,
8710261565627204971L,
4753389483755344355L,
-4645333069458786881L,
-6742695046613492214L,
643070478568866643L,
-7543096104151965610L,
7171495384655926161L,
595063872610714431L,
3292310150781130424L,
4326847806055440904L,
-4580020566072794152L,
3142286571820373678L,
5530356537440155930L,
546372639737516181L,
7401214477400367500L,
7406531960402873109L,
3287639667219172570L,
4977301681213633671L,
5253257820925174498L,
2906216636104297878L,
6142955758238347523L,
-3498651268741727235L,
-5875053958265588593L,
3896719087169993883L,
-910904726885775073L,
380107493197368177L,
-4993591912695447004L,
2970487257212582761L,
2551762717569548774L,
953061649962736812L,
8949739538606589463L,
-2962839167079475801L,
-1375673191272573835L,
3761793818361866390L,
-389577789190726878L,
5661262051502180269L,
-6558556411143987683L,
-702798336372315031L,
-336662820551371779L,
998576401126580155L,
-5945021269112582755L,
6108533925730179871L,
2207095297001999618L,
-9042779159998880435L,
-6177868444342118372L,
6775965402605895077L,
-3788428885163306576L,
7790055010527190387L,
3581587652196995358L,
-6176354155561607694L,
-5859381340906321207L,
395898765763528395L,
8132967590863909348L,
-3329092504090544483L,
-6785855381158040247L,
1497218517051796750L,
-5352392845588925911L,
-6271364901230559194L,
2314830370653350118L,
-7617588269001325450L,
1423166885758213795L,
8538612578307869519L,
-61918791718295474L,
-8177103503192338593L,
-4740086042584326695L,
3677931948215558698L,
6558856291580149558L,
2674975452453336335L,
5133796555646930522L,
5139252693299337100L,
7949476871295347205L,
4407815324662880678L,
-3758305875280581215L,
6066309507576587415L,
-7368508486398350973L,
-3181640264332856492L,
6905100869343314145L,
3677177673848733417L,
8862933624870506941L,
-8575223195813810568L,
9178470351355678144L,
4677809017145408358L,
-1194833416287894989L,
3436364743255571183L,
-5204770725795363579L,
560599448536335263L,
-3192077522964776200L,
-751575299648803575L,
6334581746534596579L,
-8358187891202563300L,
-1462480609823525055L,
5605961062646987941L,
4968399805931440889L,
7968693270782626653L,
-5868205923557518188L,
1830234928743560617L,
-8435261076693154407L,
2138416970728681332L,
8088740745199685138L,
806532400344230520L,
1800590379902909333L,
-8909128842071238901L,
-7357495566969170860L,
3679766664126940553L,
2060050474865839094L,
2363972840121763414L,
525695004292982714L,
-1224842191746529593L,
7011317848855545003L,
-6337167558180299938L,
-5184688833363785939L,
-8426673387248359061L,
-5035438815930785229L,
3521810320608058994L,
4803742557254962242L,
6623527039545786598L,
-1221475882122634738L,
-3344794405518401087L,
6510298498414053658L,
2844753907937720338L,
90502309714994895L,
-750403235344282494L,
-4825474181021465833L,
-3405519947983849510L,
3503875590944089793L,
7286294700691822468L,
7828126881500292486L,
8437899353709338096L,
136052254470293480L,
1113259077339995086L,
-8244887265606191121L,
8089569503800461649L,
-1429698194850157567L,
1575595674002364989L,
3576095286627428675L,
-7653655285807569222L,
-6053506977362539111L,
-3923855345805787169L,
-8001149080454232377L,
-4382867706931832271L,
4212860258835896297L,
4207674254247034014L,
5519424058779519159L,
-754483042161434654L,
1434113479814210082L,
-6416645032698336896L,
5624329676066514819L,
-8229557208322175959L,
3922640911653270376L,
7826932478782081910L,
-4862787164488635842L,
1449234668827944573L,
-1781657689570106327L,
5442827552725289699L,
3589862161007644641L,
4787115581650652778L,
-3512152721942525726L,
-6750103117958685206L,
5012970446659949261L,
6797752795961689017L,
5086454597639943700L,
-7616068364979994076L,
1492846825433110217L,
2967476304433704510L,
-8413824338284112078L,
-1319049442043273974L,
-1756090916806844109L,
-9061091728950139525L,
-6864767830358160810L,
4879532090226251157L,
5528644708740739488L
};
}
| 2,325 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/MemoryRequestServer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory;
/**
* The MemoryRequestServer is a callback interface to provide a means for direct (off-heap), heap and ByteBuffer
* backed resources to request more memory.
*
* @author Lee Rhodes
*/
public interface MemoryRequestServer {
/**
* Request new WritableMemory with the given capacity. The current Writable Memory will be used to
* determine the byte order of the returned WritableMemory and other checks.
* @param currentWritableMemory the current writableMemory of the client. It must be non-null.
* @param capacityBytes The capacity being requested. It must be ≥ 0.
*
* @return new WritableMemory with the given capacity.
*/
WritableMemory request(WritableMemory currentWritableMemory, long capacityBytes);
/**
* Request close the AutoCloseable resource. This only applies to resources allocated using
* WritableMemory.allocateDirect(...).
* This may be ignored depending on the application implementation.
* @param memToClose the relevant WritbleMemory to be considered for closing. It must be non-null.
* @param newMemory the newly allocated WritableMemory. It must be non-null.
* This is returned from the client to facilitate tracking for the convenience of the resource owner.
*/
void requestClose(final WritableMemory memToClose, WritableMemory newMemory);
}
| 2,326 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/XxHash.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory;
import static org.apache.datasketches.memory.internal.XxHash64.hash;
import static org.apache.datasketches.memory.internal.XxHash64.hashBooleans;
import static org.apache.datasketches.memory.internal.XxHash64.hashBytes;
import static org.apache.datasketches.memory.internal.XxHash64.hashChars;
import static org.apache.datasketches.memory.internal.XxHash64.hashDoubles;
import static org.apache.datasketches.memory.internal.XxHash64.hashFloats;
import static org.apache.datasketches.memory.internal.XxHash64.hashInts;
import static org.apache.datasketches.memory.internal.XxHash64.hashLongs;
import static org.apache.datasketches.memory.internal.XxHash64.hashShorts;
/**
* The XxHash is a fast, non-cryptographic, 64-bit hash function that has
* excellent avalanche and 2-way bit independence properties.
* This java version adapted the C++ version and the OpenHFT/Zero-Allocation-Hashing implementation
* referenced below as inspiration.
*
* <p>The C++ source repository:
* <a href="https://github.com/Cyan4973/xxHash">
* https://github.com/Cyan4973/xxHash</a>. It has a BSD 2-Clause License:
* <a href="http://www.opensource.org/licenses/bsd-license.php">
* http://www.opensource.org/licenses/bsd-license.php</a>. See LICENSE.
*
* <p>Portions of this code were adapted from
* <a href="https://github.com/OpenHFT/Zero-Allocation-Hashing/blob/master/src/main/java/net/openhft/hashing/XxHash.java">
* OpenHFT/Zero-Allocation-Hashing</a>, which has an Apache 2 license as does this site. See LICENSE.
*
* @author Lee Rhodes
*/
public final class XxHash {
private XxHash() { /* singleton */ }
/**
* Hash the given arr starting at the given offset and continuing for the given length using the
* given seed.
* @param arr the given array
* @param offsetBooleans starting at this offset
* @param lengthBooleans continuing for this length
* @param seed the given seed
* @return the hash
*/
public static long hashBooleanArr(final boolean[] arr, final long offsetBooleans,
final long lengthBooleans, final long seed) {
return hashBooleans(arr, offsetBooleans, lengthBooleans, seed);
}
/**
* Hash the given arr starting at the given offset and continuing for the given length using the
* given seed.
* @param arr the given array
* @param offsetBytes starting at this offset
* @param lengthBytes continuing for this length
* @param seed the given seed
* @return the hash
*/
public static long hashByteArr(final byte[] arr, final long offsetBytes,
final long lengthBytes, final long seed) {
return hashBytes(arr, offsetBytes, lengthBytes, seed);
}
/**
* Hash the given arr starting at the given offset and continuing for the given length using the
* given seed.
* @param arr the given array
* @param offsetShorts starting at this offset
* @param lengthShorts continuing for this length
* @param seed the given seed
* @return the hash
*/
public static long hashShortArr(final short[] arr, final long offsetShorts,
final long lengthShorts, final long seed) {
return hashShorts(arr, offsetShorts, lengthShorts, seed);
}
/**
* Hash the given arr starting at the given offset and continuing for the given length using the
* given seed.
* @param arr the given array
* @param offsetChars starting at this offset
* @param lengthChars continuing for this length
* @param seed the given seed
* @return the hash
*/
public static long hashCharArr(final char[] arr, final long offsetChars,
final long lengthChars, final long seed) {
return hashChars(arr, offsetChars, lengthChars, seed);
}
/**
* Hash the given arr starting at the given offset and continuing for the given length using the
* given seed.
* @param arr the given array
* @param offsetInts starting at this offset
* @param lengthInts continuing for this length
* @param seed the given seed
* @return the hash
*/
public static long hashIntArr(final int[] arr, final long offsetInts,
final long lengthInts, final long seed) {
return hashInts(arr, offsetInts, lengthInts, seed);
}
/**
* Hash the given arr starting at the given offset and continuing for the given length using the
* given seed.
* @param arr the given array
* @param offsetLongs starting at this offset
* @param lengthLongs continuing for this length
* @param seed the given seed
* @return the hash
*/
public static long hashLongArr(final long[] arr, final long offsetLongs,
final long lengthLongs, final long seed) {
return hashLongs(arr, offsetLongs, lengthLongs, seed);
}
/**
* Returns a 64-bit hash from a single long. This method has been optimized for speed when only
* a single hash of a long is required.
* @param in A long.
* @param seed A long valued seed.
* @return the hash.
*/
public static long hashLong(final long in, final long seed) {
return hash(in, seed);
}
/**
* Hash the given arr starting at the given offset and continuing for the given length using the
* given seed.
* @param arr the given array
* @param offsetFloats starting at this offset
* @param lengthFloats continuing for this length
* @param seed the given seed
* @return the hash
*/
public static long hashFloatArr(final float[] arr, final long offsetFloats,
final long lengthFloats, final long seed) {
return hashFloats(arr, offsetFloats, lengthFloats, seed);
}
/**
* Hash the given arr starting at the given offset and continuing for the given length using the
* given seed.
* @param arr the given array
* @param offsetDoubles starting at this offset
* @param lengthDoubles continuing for this length
* @param seed the given seed
* @return the hash
*/
public static long hashDoubleArr(final double[] arr, final long offsetDoubles,
final long lengthDoubles, final long seed) {
return hashDoubles(arr, offsetDoubles, lengthDoubles, seed);
}
/**
* Hash the given arr starting at the given offset and continuing for the given length using the
* given seed.
* @param str the given string
* @param offsetChars starting at this offset
* @param lengthChars continuing for this length
* @param seed the given seed
* @return the hash
*/
public static long hashString(final String str, final long offsetChars,
final long lengthChars, final long seed) {
return org.apache.datasketches.memory.internal.XxHash64.hashString(str, offsetChars, lengthChars, seed);
}
}
| 2,327 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/MemoryBoundsException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory;
/**
* Specific RuntimeException for bounds violations.
*
* @author Lee Rhodes
*/
public class MemoryBoundsException extends MemoryException {
private static final long serialVersionUID = 1L;
/**
* The associated operation violated access bounds with required details.
*
* @param details of the violation.
*/
public MemoryBoundsException(final String details) {
super(details);
}
}
| 2,328 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/WritableMemory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory;
import static org.apache.datasketches.memory.internal.Util.negativeCheck;
import java.io.File;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Objects;
import org.apache.datasketches.memory.internal.BaseWritableMemoryImpl;
import org.apache.datasketches.memory.internal.Prim;
import org.apache.datasketches.memory.internal.ResourceImpl;
/**
* Defines the writable API for offset access to a resource.
*
* <p>The classes in this package are not thread-safe.</p>
*
* @author Lee Rhodes
*/
public interface WritableMemory extends Memory {
//BYTE BUFFER
/**
* Accesses the given <i>ByteBuffer</i> for write operations. The returned <i>WritableMemory</i> object has
* the same byte order, as the given <i>ByteBuffer</i>.
* @param byteBuffer the given <i>ByteBuffer</i>. It must be non-null, with capacity ≥ 0, and writable.
* @return a new <i>WritableMemory</i> for write operations on the given <i>ByteBuffer</i>.
*/
static WritableMemory writableWrap(ByteBuffer byteBuffer) {
return writableWrap(byteBuffer, byteBuffer.order(), defaultMemReqSvr);
}
/**
* Accesses the given <i>ByteBuffer</i> for write operations. The returned <i>WritableMemory</i> object has
* the given byte order, ignoring the byte order of the given <i>ByteBuffer</i> for future writes and following reads.
* However, this does not change the byte order of data already in the <i>ByteBuffer</i>.
* @param byteBuffer the given <i>ByteBuffer</i>. It must be non-null, with capacity ≥ 0, and writable.
* @param byteOrder the byte order to be used. It must be non-null.
* @return a new <i>WritableMemory</i> for write operations on the given <i>ByteBuffer</i>.
*/
static WritableMemory writableWrap(ByteBuffer byteBuffer, ByteOrder byteOrder) {
return writableWrap(byteBuffer, byteOrder, defaultMemReqSvr);
}
/**
* Accesses the given <i>ByteBuffer</i> for write operations. The returned <i>WritableMemory</i> object has
* the given byte order, ignoring the byte order of the given <i>ByteBuffer</i> for future reads and writes.
* However, this does not change the byte order of data already in the <i>ByteBuffer</i>.
* @param byteBuffer the given <i>ByteBuffer</i>. It must be non-null, with capacity ≥ 0, and writable.
* @param byteOrder the byte order to be used. It must be non-null.
* @param memReqSvr A user-specified <i>MemoryRequestServer</i>, which may be null.
* This is a callback mechanism for a user client to request a larger <i>WritableMemory</i>.
* @return a new <i>WritableMemory</i> for write operations on the given <i>ByteBuffer</i>.
*/
static WritableMemory writableWrap(ByteBuffer byteBuffer, ByteOrder byteOrder, MemoryRequestServer memReqSvr) {
Objects.requireNonNull(byteBuffer, "byteBuffer must be non-null");
Objects.requireNonNull(byteOrder, "byteOrder must be non-null");
negativeCheck(byteBuffer.capacity(), "byteBuffer");
if (byteBuffer.isReadOnly()) { throw new ReadOnlyException("byteBuffer must be writable."); }
return BaseWritableMemoryImpl.wrapByteBuffer(byteBuffer, false, byteOrder, memReqSvr);
}
//MAP
/**
* Maps the entire given file into native-ordered WritableMemory for write operations
* Calling this method is equivalent to calling
* {@link #writableMap(File, long, long, ByteOrder) writableMap(file, 0, file.length(), ByteOrder.nativeOrder())}.
* @param file the given file to map. It must be non-null, with length ≥ 0, and writable.
* @return WritableMemory for managing the mapped Memory.
*/
static WritableMemory writableMap(File file) {
return writableMap(file, 0, file.length(), ByteOrder.nativeOrder());
}
/**
* Maps the specified portion of the given file into Memory for write operations.
*
* <p><b>Note:</b> Always qualify this method with the class name, e.g.,
* <i>WritableMemory.map(...)</i>.
* @param file the given file to map. It must be non-null, writable and length ≥ 0.
* @param fileOffsetBytes the position in the given file in bytes. It must not be negative.
* @param capacityBytes the size of the mapped Memory. It must not be negative.
* @param byteOrder the byte order to be used for the given file. It must be non-null.
* @return WritableMemory for managing the mapped Memory.
*/
static WritableMemory writableMap(File file, long fileOffsetBytes, long capacityBytes, ByteOrder byteOrder) {
Objects.requireNonNull(file, "file must be non-null.");
Objects.requireNonNull(byteOrder, "byteOrder must be non-null.");
if (!file.canWrite()) { throw new ReadOnlyException("file must be writable."); }
negativeCheck(file.length(), "file.length()");
negativeCheck(fileOffsetBytes, "fileOffsetBytes");
negativeCheck(capacityBytes, "capacityBytes");
return BaseWritableMemoryImpl.wrapMap(file, fileOffsetBytes, capacityBytes, false, byteOrder);
}
//ALLOCATE DIRECT
/**
* Allocates and provides access to capacityBytes directly in native (off-heap) memory.
* Native byte order is assumed.
* The allocated memory will be 8-byte aligned, but may not be page aligned.
*
* <p><b>NOTE:</b> Native/Direct memory acquired may have garbage in it.
* It is the responsibility of the using application to clear this memory, if required,
* and to call <i>close()</i> when done.</p>
*
* @param capacityBytes the size of the desired memory in bytes. It must be ≥ 0.
* @return WritableMemory for this off-heap resource.
*/
static WritableMemory allocateDirect(long capacityBytes) {
return allocateDirect(capacityBytes, ByteOrder.nativeOrder(), defaultMemReqSvr);
}
/**
* Allocates and provides access to capacityBytes directly in native (off-heap) memory.
* The allocated memory will be 8-byte aligned, but may not be page aligned.
*
* <p><b>NOTE:</b> Native/Direct memory acquired may have garbage in it.
* It is the responsibility of the using application to clear this memory, if required,
* and to call <i>close()</i> when done.</p>
*
* @param capacityBytes the size of the desired memory in bytes. It must be ≥ 0.
* @param byteOrder the given byte order. It must be non-null.
* @param memReqSvr A user-specified MemoryRequestServer, which may be null.
* This is a callback mechanism for a user client of direct memory to request more memory.
* @return WritableMemory for this off-heap resource.
*/
static WritableMemory allocateDirect(long capacityBytes, ByteOrder byteOrder, MemoryRequestServer memReqSvr) {
Objects.requireNonNull(byteOrder, "byteOrder must be non-null");
negativeCheck(capacityBytes, "capacityBytes");
return BaseWritableMemoryImpl.wrapDirect(capacityBytes, byteOrder, memReqSvr);
}
//REGIONS
/**
* A writable region is a writable view of this object.
* This returns a new <i>WritableMemory</i> representing the defined writable region with the
* given offsetBytes and capacityBytes.
* <ul>
* <li>Returned object's origin = this objects' origin + <i>offsetBytes</i></li>
* <li>Returned object's capacity = <i>capacityBytes</i></li>
* </ul>
*
* @param regionOffsetBytes the starting offset with respect to this object. It must be ≥ 0.
* @param capacityBytes the capacity of the returned object in bytes. It must be ≥ 0.
* @return a new <i>WritableMemory</i> representing the defined writable region.
*/
default WritableMemory writableRegion(long regionOffsetBytes, long capacityBytes) {
return writableRegion(regionOffsetBytes, capacityBytes, getByteOrder());
}
/**
* A writable region is a writable view of this object.
* This returns a new <i>WritableMemory</i> representing the defined writable region with the
* given offsetBytes, capacityBytes and byte order.
* <ul>
* <li>Returned object's origin = this objects' origin + <i>offsetBytes</i></li>
* <li>Returned object's capacity = <i>capacityBytes</i></li>
* <li>Returned object's byte order = <i>byteOrder</i></li>
* </ul>
*
* @param offsetBytes the starting offset with respect to this object. It must be ≥ 0.
* @param capacityBytes the capacity of the returned object in bytes. It must be ≥ 0.
* @param byteOrder the given byte order. It must be non-null.
* @return a new <i>WritableMemory</i> representing the defined writable region.
*/
WritableMemory writableRegion(long offsetBytes, long capacityBytes, ByteOrder byteOrder);
//AS WRITABLE BUFFER
/**
* Returns a new <i>WritableBuffer</i> with a writable view of this object.
* <ul>
* <li>Returned object's origin = this object's origin</li>
* <li>Returned object's <i>start</i> = 0</li>
* <li>Returned object's <i>position</i> = 0</li>
* <li>Returned object's <i>end</i> = this object's capacity</li>
* <li>Returned object's <i>capacity</i> = this object's capacity</li>
* <li>Returned object's <i>start</i>, <i>position</i> and <i>end</i> are mutable</li>
* </ul>
* @return a new <i>WritableBuffer</i> with a view of this WritableMemory
*/
default WritableBuffer asWritableBuffer() {
return asWritableBuffer(getByteOrder());
}
/**
* Returns a new <i>WritableBuffer</i> with a writable view of this object
* with the given byte order.
* <ul>
* <li>Returned object's origin = this object's origin</li>
* <li>Returned object's <i>start</i> = 0</li>
* <li>Returned object's <i>position</i> = 0</li>
* <li>Returned object's <i>end</i> = this object's capacity</li>
* <li>Returned object's <i>capacity</i> = this object's capacity</li>
* <li>Returned object's <i>start</i>, <i>position</i> and <i>end</i> are mutable</li>
* </ul>
* @param byteOrder the given byte order
* @return a new <i>WritableBuffer</i> with a view of this WritableMemory
*/
WritableBuffer asWritableBuffer(ByteOrder byteOrder);
//ALLOCATE HEAP VIA AUTOMATIC BYTE ARRAY
/**
* Creates on-heap WritableMemory with the given capacity and the native byte order.
* @param capacityBytes the given capacity in bytes. It must be ≥ 0.
* @return a new WritableMemory for write operations on a new byte array.
*/
static WritableMemory allocate(int capacityBytes) {
return allocate(capacityBytes, ByteOrder.nativeOrder(), defaultMemReqSvr);
}
/**
* Creates on-heap WritableMemory with the given capacity and the given byte order.
* @param capacityBytes the given capacity in bytes. It must be ≥ 0.
* @param byteOrder the given byte order to allocate new Memory object with. It must be non-null.
* @return a new WritableMemory for write operations on a new byte array.
*/
static WritableMemory allocate(int capacityBytes, ByteOrder byteOrder) {
return allocate(capacityBytes, byteOrder, defaultMemReqSvr);
}
/**
* Creates on-heap WritableMemory with the given capacity and the given byte order.
* @param capacityBytes the given capacity in bytes. It must be ≥ 0.
* @param byteOrder the given byte order to allocate new Memory object with. It must be non-null.
* @param memReqSvr A user-specified <i>MemoryRequestServer</i>, which may be null.
* This is a callback mechanism for a user client to request a larger <i>WritableMemory</i>.
* @return a new WritableMemory for write operations on a new byte array.
*/
static WritableMemory allocate(int capacityBytes, ByteOrder byteOrder, MemoryRequestServer memReqSvr) {
final byte[] arr = new byte[capacityBytes];
negativeCheck(capacityBytes, "capacityBytes");
return writableWrap(arr, 0, capacityBytes, byteOrder, memReqSvr);
}
//ACCESS PRIMITIVE HEAP ARRAYS for WRITE
/**
* Wraps the given primitive array for write operations assuming native byte order.
*
* <p><b>Note:</b> Always qualify this method with the class name, e.g.,
* <i>WritableMemory.wrap(...)</i>.
* @param array the given primitive array. It must be non-null.
* @return a new WritableMemory for write operations on the given primitive array.
*/
static WritableMemory writableWrap(byte[] array) {
return writableWrap(array, 0, array.length, ByteOrder.nativeOrder(), defaultMemReqSvr);
}
/**
* Wraps the given primitive array for write operations with the given byte order.
*
* <p><b>Note:</b> Always qualify this method with the class name, e.g.,
* <i>WritableMemory.wrap(...)</i>.
* @param array the given primitive array. It must be non-null.
* @param byteOrder the byte order to be used. It must be non-null.
* @return a new WritableMemory for write operations on the given primitive array.
*/
static WritableMemory writableWrap(byte[] array, ByteOrder byteOrder) {
return writableWrap(array, 0, array.length, byteOrder, defaultMemReqSvr);
}
/**
* Wraps the given primitive array for write operations with the given byte order.
*
* <p><b>Note:</b> Always qualify this method with the class name, e.g.,
* <i>WritableMemory.wrap(...)</i>.
* @param array the given primitive array. It must be non-null.
* @param offsetBytes the byte offset into the given array. It must be ≥ 0.
* @param lengthBytes the number of bytes to include from the given array. It must be ≥ 0.
* @param byteOrder the byte order to be used. It must be non-null.
* @return a new WritableMemory for write operations on the given primitive array.
*/
static WritableMemory writableWrap(byte[] array, int offsetBytes, int lengthBytes, ByteOrder byteOrder) {
return writableWrap(array, offsetBytes, lengthBytes, byteOrder, defaultMemReqSvr);
}
/**
* Wraps the given primitive array for write operations with the given byte order. If the given
* lengthBytes is zero, backing storage, byte order and read-only status of the returned
* WritableMemory object are unspecified.
*
* <p><b>Note:</b> Always qualify this method with the class name, e.g.,
* <i>WritableMemory.wrap(...)</i>.
* @param array the given primitive array. It must be non-null.
* @param offsetBytes the byte offset into the given array. It must be ≥ 0.
* @param lengthBytes the number of bytes to include from the given array. It must be ≥ 0.
* @param byteOrder the byte order to be used. It must be non-null.
* @param memReqSvr A user-specified <i>MemoryRequestServer</i>, which may be null.
* This is a callback mechanism for a user client to request a larger <i>WritableMemory</i>.
* @return a new WritableMemory for write operations on the given primitive array.
*/
static WritableMemory writableWrap(byte[] array, int offsetBytes, int lengthBytes, ByteOrder byteOrder,
MemoryRequestServer memReqSvr) {
Objects.requireNonNull(array, "array must be non-null");
Objects.requireNonNull(byteOrder, "byteOrder must be non-null");
negativeCheck(offsetBytes, "offsetBytes");
negativeCheck(lengthBytes, "lengthBytes");
ResourceImpl.checkBounds(offsetBytes, lengthBytes, array.length);
return BaseWritableMemoryImpl.wrapHeapArray(array, offsetBytes, lengthBytes, false, byteOrder, memReqSvr);
}
/**
* Wraps the given primitive array for write operations assuming native byte order.
* @param array the given primitive array. It must be non-null.
* @return a new WritableMemory for write operations on the given primitive array.
*/
static WritableMemory writableWrap(boolean[] array) {
Objects.requireNonNull(array, "array must be non-null");
final long lengthBytes = array.length << Prim.BOOLEAN.shift();
return BaseWritableMemoryImpl.wrapHeapArray(array, 0, lengthBytes, false, ByteOrder.nativeOrder(),
defaultMemReqSvr);
}
/**
* Wraps the given primitive array for write operations assuming native byte order.
* @param array the given primitive array.
* @return a new WritableMemory for write operations on the given primitive array.
*/
static WritableMemory writableWrap(char[] array) {
Objects.requireNonNull(array, "array must be non-null");
final long lengthBytes = array.length << Prim.CHAR.shift();
return BaseWritableMemoryImpl.wrapHeapArray(array, 0L, lengthBytes, false, ByteOrder.nativeOrder(),
defaultMemReqSvr);
}
/**
* Wraps the given primitive array for write operations assuming native byte order.
* @param array the given primitive array.
* @return a new WritableMemory for write operations on the given primitive array.
*/
static WritableMemory writableWrap(short[] array) {
Objects.requireNonNull(array, "arr must be non-null");
final long lengthBytes = array.length << Prim.SHORT.shift();
return BaseWritableMemoryImpl.wrapHeapArray(array, 0L, lengthBytes, false, ByteOrder.nativeOrder(),
defaultMemReqSvr);
}
/**
* Wraps the given primitive array for write operations assuming native byte order.
* @param array the given primitive array.
* @return a new WritableMemory for write operations on the given primitive array.
*/
static WritableMemory writableWrap(int[] array) {
Objects.requireNonNull(array, "arr must be non-null");
final long lengthBytes = array.length << Prim.INT.shift();
return BaseWritableMemoryImpl.wrapHeapArray(array, 0L, lengthBytes, false, ByteOrder.nativeOrder(),
defaultMemReqSvr);
}
/**
* Wraps the given primitive array for write operations assuming native byte order.
* @param array the given primitive array.
* @return a new WritableMemory for write operations on the given primitive array.
*/
static WritableMemory writableWrap(long[] array) {
Objects.requireNonNull(array, "arr must be non-null");
final long lengthBytes = array.length << Prim.LONG.shift();
return BaseWritableMemoryImpl.wrapHeapArray(array, 0L, lengthBytes, false, ByteOrder.nativeOrder(),
defaultMemReqSvr);
}
/**
* Wraps the given primitive array for write operations assuming native byte order.
* @param array the given primitive array.
* @return a new WritableMemory for write operations on the given primitive array.
*/
static WritableMemory writableWrap(float[] array) {
Objects.requireNonNull(array, "arr must be non-null");
final long lengthBytes = array.length << Prim.FLOAT.shift();
return BaseWritableMemoryImpl.wrapHeapArray(array, 0L, lengthBytes, false, ByteOrder.nativeOrder(),
defaultMemReqSvr);
}
/**
* Wraps the given primitive array for write operations assuming native byte order.
* @param array the given primitive array.
* @return a new WritableMemory for write operations on the given primitive array.
*/
static WritableMemory writableWrap(double[] array) {
Objects.requireNonNull(array, "arr must be non-null");
final long lengthBytes = array.length << Prim.DOUBLE.shift();
return BaseWritableMemoryImpl.wrapHeapArray(array, 0L, lengthBytes, false, ByteOrder.nativeOrder(),
defaultMemReqSvr);
}
//END OF CONSTRUCTOR-TYPE METHODS
//PRIMITIVE putX() and putXArray()
/**
* Puts the boolean value at the given offset
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param value the value to put
*/
void putBoolean(long offsetBytes, boolean value);
/**
* Puts the boolean array at the given offset
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param srcArray The source array.
* @param srcOffsetBooleans offset in array units
* @param lengthBooleans number of array units to transfer
*/
void putBooleanArray(long offsetBytes, boolean[] srcArray, int srcOffsetBooleans, int lengthBooleans);
/**
* Puts the byte value at the given offset
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param value the value to put
*/
void putByte(long offsetBytes, byte value);
/**
* Puts the byte array at the given offset
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param srcArray The source array.
* @param srcOffsetBytes offset in array units
* @param lengthBytes number of array units to transfer
*/
void putByteArray(long offsetBytes, byte[] srcArray, int srcOffsetBytes, int lengthBytes);
/**
* Puts the char value at the given offset
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param value the value to put
*/
void putChar(long offsetBytes, char value);
/**
* Puts the char array at the given offset
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param srcArray The source array.
* @param srcOffsetChars offset in array units
* @param lengthChars number of array units to transfer
*/
void putCharArray(long offsetBytes, char[] srcArray, int srcOffsetChars, int lengthChars);
/**
* Encodes characters from the given CharSequence into UTF-8 bytes and puts them into this
* <i>WritableMemory</i> beginning at the given offsetBytes.
* This is specifically designed to reduce the production of intermediate objects (garbage),
* thus significantly reducing pressure on the JVM Garbage Collector.
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param src The source CharSequence to be encoded and put into this WritableMemory. It is
* the responsibility of the caller to provide sufficient capacity in this
* <i>WritableMemory</i> for the encoded Utf8 bytes. Characters outside the ASCII range can
* require 2, 3 or 4 bytes per character to encode.
* @return the number of bytes encoded
*/
long putCharsToUtf8(long offsetBytes, CharSequence src);
/**
* Puts the double value at the given offset
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param value the value to put
*/
void putDouble(long offsetBytes, double value);
/**
* Puts the double array at the given offset
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param srcArray The source array.
* @param srcOffsetDoubles offset in array units
* @param lengthDoubles number of array units to transfer
*/
void putDoubleArray(long offsetBytes, double[] srcArray, int srcOffsetDoubles, int lengthDoubles);
/**
* Puts the float value at the given offset
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param value the value to put
*/
void putFloat(long offsetBytes, float value);
/**
* Puts the float array at the given offset
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param srcArray The source array.
* @param srcOffsetFloats offset in array units
* @param lengthFloats number of array units to transfer
*/
void putFloatArray(long offsetBytes, float[] srcArray, int srcOffsetFloats, int lengthFloats);
/**
* Puts the int value at the given offset
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param value the value to put
*/
void putInt(long offsetBytes, int value);
/**
* Puts the int array at the given offset
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param srcArray The source array.
* @param srcOffsetInts offset in array units
* @param lengthInts number of array units to transfer
*/
void putIntArray(long offsetBytes, int[] srcArray, int srcOffsetInts, int lengthInts);
/**
* Puts the long value at the given offset
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param value the value to put
*/
void putLong(long offsetBytes, long value);
/**
* Puts the long array at the given offset
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param srcArray The source array.
* @param srcOffsetLongs offset in array units
* @param lengthLongs number of array units to transfer
*/
void putLongArray(long offsetBytes, long[] srcArray, int srcOffsetLongs, int lengthLongs);
/**
* Puts the short value at the given offset
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param value the value to put
*/
void putShort(long offsetBytes, short value);
/**
* Puts the short array at the given offset
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param srcArray The source array.
* @param srcOffsetShorts offset in array units
* @param lengthShorts number of array units to transfer
*/
void putShortArray(long offsetBytes, short[] srcArray, int srcOffsetShorts, int lengthShorts);
//OTHER WRITE METHODS
/**
* Clears all bytes of this Memory to zero
*/
void clear();
/**
* Clears a portion of this Memory to zero.
* @param offsetBytes offset bytes relative to this Memory start
* @param lengthBytes the length in bytes
*/
void clear(long offsetBytes, long lengthBytes);
/**
* Clears the bits defined by the bitMask
* @param offsetBytes offset bytes relative to this Memory start.
* @param bitMask the bits set to one will be cleared
*/
void clearBits(long offsetBytes, byte bitMask);
/**
* Fills all bytes of this Memory region to the given byte value.
* @param value the given byte value
*/
void fill(byte value);
/**
* Fills a portion of this Memory region to the given byte value.
* @param offsetBytes offset bytes relative to this Memory start
* @param lengthBytes the length in bytes
* @param value the given byte value
*/
void fill(long offsetBytes, long lengthBytes, byte value);
/**
* Sets the bits defined by the bitMask
* @param offsetBytes offset bytes relative to this Memory start
* @param bitMask the bits set to one will be set
*/
void setBits(long offsetBytes, byte bitMask);
}
| 2,329 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/ReadOnlyException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory;
/**
* The exception thrown when attempting to write into a read-only Resource.
*
* @author Praveenkumar Venkatesan
*/
public class ReadOnlyException extends MemoryException {
private static final long serialVersionUID = 1L;
/**
* Read Only Exception
* @param message the error message
*/
public ReadOnlyException(final String message) {
super(message);
}
}
| 2,330 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/WritableBuffer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Objects;
import org.apache.datasketches.memory.internal.BaseWritableBufferImpl;
import org.apache.datasketches.memory.internal.Util;
/**
* Defines the writable API for relative positional access to a resource
*
* <p>The classes in this package are not thread-safe.</p>
*
* @author Lee Rhodes
*/
public interface WritableBuffer extends Buffer {
//BYTE BUFFER
/**
* Accesses the given <i>ByteBuffer</i> for write operations. The returned <i>WritableBuffer</i> object has
* the same byte order, as the given <i>ByteBuffer</i>.
* @param byteBuf the given ByteBuffer. It must be non-null and with capacity ≥ 0.
* @return a new <i>WritableBuffer</i> for write operations on the given <i>ByteBuffer</i>.
*/
static WritableBuffer writableWrap(ByteBuffer byteBuf) {
return writableWrap(byteBuf, byteBuf.order(), defaultMemReqSvr);
}
/**
* Accesses the given <i>ByteBuffer</i> for write operations. The returned <i>WritableBuffer</i> object has
* the given byte order, ignoring the byte order of the given <i>ByteBuffer</i> for future writes and following reads.
* However, this does not change the byte order of data already in the <i>ByteBuffer</i>.
* @param byteBuf the given ByteBuffer. It must be non-null and with capacity ≥ 0.
* @param byteOrder the byte order to be used.
* @param memReqSvr A user-specified <i>MemoryRequestServer</i>, which must not be null.
* This is a callback mechanism for a user client to request a larger <i>WritableBuffer</i>.
* @return a new <i>WritableBuffer</i> for write operations on the given <i>ByteBuffer</i>.
*/
static WritableBuffer writableWrap(ByteBuffer byteBuf, ByteOrder byteOrder, MemoryRequestServer memReqSvr) {
Objects.requireNonNull(byteBuf, "ByteBuffer 'byteBuf' must not be null");
Objects.requireNonNull(byteOrder, "ByteOrder 'byteOrder' must not be null");
Util.negativeCheck(byteBuf.capacity(), "byteBuf.capacity");
if (byteBuf.isReadOnly()) {
throw new ReadOnlyException("Cannot create a WritableBuffer from a ReadOnly ByteBuffer.");
}
return BaseWritableBufferImpl.wrapByteBuffer(byteBuf, false, byteOrder, memReqSvr);
}
// NO MAP
// NO ALLOCATE DIRECT
//DUPLICATES
/**
* Returns a duplicate writable view of this Buffer with the same but independent values of
* <i>start</i>, <i>position</i> and <i>end</i>.
* <ul>
* <li>Returned object's origin = this object's origin</li>
* <li>Returned object's <i>start</i> = this object's <i>start</i></li>
* <li>Returned object's <i>position</i> = this object's <i>position</i></li>
* <li>Returned object's <i>end</i> = this object's <i>end</i></li>
* <li>Returned object's <i>capacity</i> = this object' <i>capacityBytes</i></li>
* <li>Returned object's <i>start</i>, <i>position</i> and <i>end</i> are mutable and
* independent of this object's <i>start</i>, <i>position</i> and <i>end</i></li>
* </ul>
* @return a duplicate writable view of this Buffer with the same but independent values of
* <i>start</i>, <i>position</i> and <i>end</i>.
*/
WritableBuffer writableDuplicate();
/**
* Returns a duplicate writable view of this Buffer with the same but independent values of
* <i>start</i>, <i>position</i> and <i>end</i>, but with the specified byteOrder.
* <ul>
* <li>Returned object's origin = this object's origin</li>
* <li>Returned object's <i>start</i> = this object's <i>start</i></li>
* <li>Returned object's <i>position</i> = this object's <i>position</i></li>
* <li>Returned object's <i>end</i> = this object's <i>end</i></li>
* <li>Returned object's <i>capacity</i> = this object' <i>capacityBytes</i></li>
* <li>Returned object's <i>start</i>, <i>position</i> and <i>end</i> are mutable and
* independent of this object's <i>start</i>, <i>position</i> and <i>end</i></li>
* </ul>
* @param byteOrder the given <i>ByteOrder</i>.
* @return a duplicate writable view of this Buffer with the same but independent values of
* <i>start</i>, <i>position</i> and <i>end</i>.
*/
WritableBuffer writableDuplicate(ByteOrder byteOrder);
//REGIONS
/**
* A writable region is a writable view of this object.
* <ul>
* <li>Returned object's origin = this object's <i>position</i></li>
* <li>Returned object's <i>start</i> = 0</li>
* <li>Returned object's <i>position</i> = 0</li>
* <li>Returned object's <i>end</i> = this object's (<i>end</i> - <i>position</i>)</li>
* <li>Returned object's <i>capacity</i> = this object's (<i>end</i> - <i>position</i>)</li>
* <li>Returned object's <i>start</i>, <i>position</i> and <i>end</i> are mutable and
* independent of this object's <i>start</i>, <i>position</i> and <i>end</i></li>
* </ul>
* @return a new <i>WritableBuffer</i> representing the defined writable region.
*/
WritableBuffer writableRegion();
/**
* A writable region is a writable view of this object.
* <ul>
* <li>Returned object's origin = this objects' origin + <i>offsetBytes</i></li>
* <li>Returned object's <i>start</i> = 0</li>
* <li>Returned object's <i>position</i> = 0</li>
* <li>Returned object's <i>end</i> = <i>capacityBytes</i></li>
* <li>Returned object's <i>capacity</i> = <i>capacityBytes</i></li>
* <li>Returned object's <i>start</i>, <i>position</i> and <i>end</i> are mutable and
* independent of this object's <i>start</i>, <i>position</i> and <i>end</i></li>
* <li>Returned object's byte order = <i>byteOrder</i></li>
* </ul>
*
* <p><b>Note: </b><i>asWritableMemory()</i> and <i>asMemory()</i>
* will return the originating <i>Memory</i> byte order.</p>
* @param offsetBytes the starting offset with respect to the origin of this <i>WritableBuffer</i>
* @param capacityBytes the <i>capacity</i> of the returned region in bytes
* @param byteOrder the given byte order
* @return a new <i>WritableBuffer</i> representing the defined writable region
* with the given offsetBytes, capacityBytes and byte order.
*/
WritableBuffer writableRegion(long offsetBytes, long capacityBytes,
ByteOrder byteOrder);
//AS WRITABLE MEMORY
/**
* Convert this WritableBuffer to a WritableMemory.
* If this object's capacity is zero, the returned object is effectively immutable and
* the backing storage and byte order are unspecified.
* @return WritableMemory
*/
default WritableMemory asWritableMemory() {
return asWritableMemory(ByteOrder.nativeOrder());
}
/**
* Convert this WritableBuffer to a WritableMemory with the given byte order.
* If this object's capacity is zero, the returned object is effectively immutable and
* the backing storage and byte order are unspecified.
* @param byteOrder the byte order to be used.
* @return WritableMemory
*/
WritableMemory asWritableMemory(ByteOrder byteOrder);
//NO ALLOCATE HEAP VIA AUTOMATIC BYTE ARRAY
//NO ACCESS PRIMITIVE HEAP ARRAYS for WRITE
//PRIMITIVE putX() and putXArray()
/**
* Puts the boolean value at the current position.
* Increments the position by 1.
* @param value the value to put
*/
void putBoolean(boolean value);
/**
* Puts the boolean value at the given offset.
* This does not change the position.
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start.
* @param value the value to put
*/
void putBoolean(long offsetBytes, boolean value);
/**
* Puts the boolean array at the current position.
* Increments the position by <i>lengthBooleans - srcOffsetBooleans</i>.
* @param srcArray The source array.
* @param srcOffsetBooleans offset in array units
* @param lengthBooleans number of array units to transfer
*/
void putBooleanArray(boolean[] srcArray, int srcOffsetBooleans,
int lengthBooleans);
/**
* Puts the byte value at the current position.
* Increments the position by <i>Byte.BYTES</i>.
* @param value the value to put
*/
void putByte(byte value);
/**
* Puts the byte value at the given offset.
* This does not change the position.
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param value the value to put
*/
void putByte(long offsetBytes, byte value);
/**
* Puts the byte array at the current position.
* Increments the position by <i>Byte.BYTES * (lengthBytes - srcOffsetBytes)</i>.
* @param srcArray The source array.
* @param srcOffsetBytes offset in array units
* @param lengthBytes number of array units to transfer
*/
void putByteArray(byte[] srcArray, int srcOffsetBytes, int lengthBytes);
/**
* Puts the char value at the current position.
* Increments the position by <i>Character.BYTES</i>.
* @param value the value to put
*/
void putChar(char value);
/**
* Puts the char value at the given offset.
* This does not change the position.
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param value the value to put
*/
void putChar(long offsetBytes, char value);
/**
* Puts the char array at the current position.
* Increments the position by <i>Character.BYTES * (lengthChars - srcOffsetChars)</i>.
* @param srcArray The source array.
* @param srcOffsetChars offset in array units
* @param lengthChars number of array units to transfer
*/
void putCharArray(char[] srcArray, int srcOffsetChars, int lengthChars);
/**
* Puts the double value at the current position.
* Increments the position by <i>Double.BYTES</i>.
* @param value the value to put
*/
void putDouble(double value);
/**
* Puts the double value at the given offset.
* This does not change the position.
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param value the value to put
*/
void putDouble(long offsetBytes, double value);
/**
* Puts the double array at the current position.
* Increments the position by <i>Double.BYTES * (lengthDoubles - srcOffsetDoubles)</i>.
* @param srcArray The source array.
* @param srcOffsetDoubles offset in array units
* @param lengthDoubles number of array units to transfer
*/
void putDoubleArray(double[] srcArray, int srcOffsetDoubles, int lengthDoubles);
/**
* Puts the float value at the current position.
* Increments the position by <i>Float.BYTES</i>.
* @param value the value to put
*/
void putFloat(float value);
/**
* Puts the float value at the given offset.
* This does not change the position.
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param value the value to put
*/
void putFloat(long offsetBytes, float value);
/**
* Puts the float array at the current position.
* Increments the position by <i>Float.BYTES * (lengthFloats - srcOffsetFloats)</i>.
* @param srcArray The source array.
* @param srcOffsetFloats offset in array units
* @param lengthFloats number of array units to transfer
*/
void putFloatArray(float[] srcArray, int srcOffsetFloats, int lengthFloats);
/**
* Puts the int value at the current position.
* Increments the position by <i>Integer.BYTES</i>.
* @param value the value to put
*/
void putInt(int value);
/**
* Puts the int value at the given offset.
* This does not change the position.
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param value the value to put
*/
void putInt(long offsetBytes, int value);
/**
* Puts the int array at the current position.
* Increments the position by <i>Integer.BYTES * (lengthInts - srcOffsetInts)</i>.
* @param srcArray The source array.
* @param srcOffsetInts offset in array units
* @param lengthInts number of array units to transfer
*/
void putIntArray(int[] srcArray, int srcOffsetInts, int lengthInts);
/**
* Puts the long value at the current position.
* Increments the position by <i>Long.BYTES</i>.
* @param value the value to put
*/
void putLong(long value);
/**
* Puts the long value at the given offset.
* This does not change the position.
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param value the value to put
*/
void putLong(long offsetBytes, long value);
/**
* Puts the long array at the current position.
* Increments the position by <i>Long.BYTES * (lengthLongs - srcOffsetLongs)</i>.
* @param srcArray The source array.
* @param srcOffsetLongs offset in array units
* @param lengthLongs number of array units to transfer
*/
void putLongArray(long[] srcArray, int srcOffsetLongs, int lengthLongs);
/**
* Puts the short value at the current position.
* Increments the position by <i>Short.BYTES</i>.
* @param value the value to put
*/
void putShort(short value);
/**
* Puts the short value at the given offset.
* This does not change the position.
* @param offsetBytes offset bytes relative to this <i>WritableMemory</i> start
* @param value the value to put
*/
void putShort(long offsetBytes, short value);
/**
* Puts the short array at the current position.
* Increments the position by <i>Short.BYTES * (lengthShorts - srcOffsetShorts)</i>.
* @param srcArray The source array.
* @param srcOffsetShorts offset in array units
* @param lengthShorts number of array units to transfer
*/
void putShortArray(short[] srcArray, int srcOffsetShorts, int lengthShorts);
// NO ATOMIC METHODS
//OTHER WRITE METHODS
/**
* Clears all bytes of this Buffer from position to end to zero. The position will be set to end.
*/
void clear();
//NO clearBits(...)
/**
* Fills this Buffer from position to end with the given byte value.
* The position will be set to <i>end</i>.
* @param value the given byte value
*/
void fill(byte value);
//NO fill(offsetBytes, lengthBytes, value)
//NO setBits(...)
}
| 2,331 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/BaseBuffer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory;
/**
* Defines the relative positional API.
*
* <p>The classes in this package are not thread-safe.</p>
*
* @author Lee Rhodes
*/
public interface BaseBuffer extends Resource {
/**
* Increments the current position by the given increment.
* Asserts that the resource is valid and that the positional invariants are not violated,
* otherwise, if asserts are enabled throws an {@link AssertionError}.
* @param increment the given increment
* @return BaseBuffer
*/
BaseBuffer incrementPosition(long increment);
/**
* Increments the current position by the given increment.
* Checks that the resource is valid and that the positional invariants are not violated,
* otherwise throws an {@link IllegalArgumentException}.
* @param increment the given increment
* @return BaseBuffer
*/
BaseBuffer incrementAndCheckPosition(final long increment);
/**
* Gets the end position
* @return the end position
*/
long getEnd();
/**
* Gets the current position
* @return the current position
*/
long getPosition();
/**
* Gets start position
* @return start position
*/
long getStart();
/**
* The number of elements remaining between the current position and the end position
* @return {@code (end - position)}
*/
long getRemaining();
/**
* Returns true if there are elements remaining between the current position and the end position
* @return {@code (end - position) > 0}
*/
boolean hasRemaining();
/**
* Resets the current position to the start position,
* This does not modify any data.
* @return BaseBuffer
*/
BaseBuffer resetPosition();
/**
* Sets the current position.
* @param position the given current position.
* @return BaseBuffer
* @throws BufferPositionInvariantsException if positional invariants have been violated.
*/
BaseBuffer setPosition(long position);
/**
* Sets start position, current position, and end position.
* @param start the start position in the buffer
* @param position the current position between the start and end
* @param end the end position in the buffer
* @return BaseBuffer
* @throws BufferPositionInvariantsException if positional invariants have been violated.
*/
BaseBuffer setStartPositionEnd(long start, long position, long end);
}
| 2,332 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/Resource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
/**
* Methods common to all memory access resources, including attributes like byte order and capacity.
*
* <p>The classes in this package are not thread-safe.</p>
*
* @author Lee Rhodes
*/
public interface Resource extends AutoCloseable {
static MemoryRequestServer defaultMemReqSvr = null; //policy choice
/**
* Closes this resource if this can be closed via <em>AutoCloseable</em>.
* If this operation completes without exceptions, this resource will be marked as <em>not alive</em>,
* and subsequent operations on this resource will fail with {@link IllegalStateException}.
*
* @apiNote This operation is not idempotent; that is, closing an already closed resource <em>always</em>
* results in an exception being thrown. This reflects a deliberate design choice: resource state transitions
* should be manifest in the client code; a failure in any of these transitions reveals a bug in the underlying
* application logic.
*
* @throws IllegalStateException if this Resource is not <em>valid</em>.
* @throws IllegalStateException if this method is not accessed from the owning thread.
* @throws UnsupportedOperationException if this resource is not {@link AutoCloseable}.
*/
@Override
void close();
/**
* Returns true if the given object (<em>that</em>) is an instance of this class and has contents equal to
* this object.
* @param that the given Resource object
* @return true if the given object has equal contents to this object.
* @see #equalTo(long, Resource, long, long)
*/
default boolean equalTo(Resource that) {
if (that == null || this.getCapacity() != that.getCapacity()) { return false; }
else { return equalTo(0, that, 0, that.getCapacity()); }
}
/**
* Returns true if the given Resource has equal contents to
* this object in the given range of bytes. This will also check two distinct ranges within the
* same object for equals.
* @param thisOffsetBytes the starting offset in bytes for this object.
* @param that the given Resource
* @param thatOffsetBytes the starting offset in bytes for the given Resource object
* @param lengthBytes the size of the range in bytes
* @return true if the given Resource object has equal contents to this object in the given range of bytes.
* @throws IllegalStateException if either resource is not <em>valid</em>.
* @throws MemoryBoundsException if there is a bounds violation.
*/
boolean equalTo(long thisOffsetBytes, Resource that, long thatOffsetBytes, long lengthBytes);
/**
* Forces any changes made to the contents of this memory-mapped Resource to be written to the storage
* device described by the configured file descriptor.
*
* <p>If the file descriptor associated with this memory-mapped Resource resides on a local storage device then when
* this method returns, it is guaranteed that all changes made to this mapped Resource since it was created, or since
* this method was last invoked, will have been written to that device.</p>
*
* <p>If the file descriptor associated with this memory-mapped Resource does not reside on a local device then no
* such guarantee is made.</p>
*
* <p>If this memory-mapped Resource was not mapped in read/write mode
* ({@link java.nio.channels.FileChannel.MapMode#READ_WRITE}) then invoking this method may have no effect.
* In particular, this method has no effect for files mapped in read-only or private
* mapping modes. This method may or may not have an effect for implementation-specific mapping modes.</p>
*
* @throws IllegalStateException if this Resource is not <em>valid</em>.
* @throws IllegalStateException if this method is not accessed from the owning thread.
* @throws UnsupportedOperationException if this Resource is not memory-mapped, e.g. if {@code isMapped() == false}.
* @throws ReadOnlyException if this Resource is read-only.
* @throws RuntimeException if there is some other error writing the contents of this
* memory-mapped Resource to the associated storage device.
*/
void force();
/**
* Gets the current ByteOrder.
* This may be different from the ByteOrder of the backing resource and {@link ByteOrder#nativeOrder()}
* @return the current ByteOrder.
*/
ByteOrder getByteOrder();
/**
* Gets the capacity of this object in bytes
* @return the capacity of this object in bytes
*/
long getCapacity();
/**
* Gets the MemoryRequestServer object, if set, for the below resources to request additional memory.
*
* <p>WritableMemory enables this for ByteBuffer, Heap and Direct Memory backed resources.</p>
*
* <p>WritableBuffer enables this for ByteBuffer backed resources. However, the object returned is in the form of
* a WritableMemory. To convert to WritableBuffer use asWritableBuffer(). To enable for Heap and Direct Buffer
* resources, use the WritableMemory to configure and then call asWritableBuffer().</p>
*
* <p>Map backed resources will always return null.</p>
*
* <p>The user must customize the actions of the MemoryRequestServer by
* implementing the MemoryRequestServer interface.</p>
*
* <p>For WritableMemory, to enable at runtime set your custom MemoryRequestServer using one of these methods:</p>
* <ul><li>{@link WritableMemory#allocateDirect(long, ByteOrder, MemoryRequestServer)}</li>
* <li>{@link WritableMemory#allocate(int, ByteOrder, MemoryRequestServer)}</li>
* <li>{@link WritableMemory#writableWrap(ByteBuffer, ByteOrder, MemoryRequestServer)}</li>
* </ul>
*
* <p>ForWritableBuffer, to enable at runtime set your custom MemoryRequestServer using the following method:</p>
* <ul>
* <li>{@link WritableBuffer#writableWrap(ByteBuffer, ByteOrder, MemoryRequestServer)}</li>
* </ul>
*
* <p>Simple implementation examples include the DefaultMemoryRequestServer in the main source tree, as well as
* the ExampleMemoryRequestServerTest and the use with ByteBuffer documented in the DruidIssue11544Test
* in the test source tree.</p>
*
* @return the MemoryRequestServer object or null.
*/
MemoryRequestServer getMemoryRequestServer();
/**
* Returns the offset of address zero of this object relative to the base address of the
* backing resource. This does not include the object header for heap arrays nor the initial
* offset of a memory-mapped file.
* @return the offset of address zero of this object relative to the base address of the
* backing resource.
*/
long getTotalOffset();
/**
* Returns true if this Memory is backed by a ByteBuffer.
* @return true if this Memory is backed by a ByteBuffer.
*/
boolean isByteBufferResource();
/**
* Returns true if the Native ByteOrder is the same as the ByteOrder of the
* current Buffer or Memory and the same ByteOrder as the given byteOrder.
* @param byteOrder the given ByteOrder
* @return true if the Native ByteOrder is the same as the ByteOrder of the
* current Buffer or Memory and the same ByteOrder as the given byteOrder.
*/
boolean isByteOrderCompatible(ByteOrder byteOrder);
/**
* If true, the backing resource is direct (off-heap) memory.
* This is the case for allocated direct memory, memory-mapped files,
* or from a wrapped ByteBuffer that was allocated direct.
* If false, the backing resource is the Java heap.
* @return true if the backing resource is direct (off-heap) memory.
*/
boolean isDirectResource();
/**
* Returns true if this instance is a duplicate of a Buffer instance.
* @return true if this instance is a duplicate of a Buffer instance.
*/
boolean isDuplicateBufferView();
/**
* Returns true if this object is backed by an on-heap primitive array or an on-heap ByteBuffer.
* @return true if this object is backed by an on-heap primitive array or an on-heap ByteBuffer.
*/
boolean isHeapResource();
/**
* Tells whether or not the contents of this memory-mapped Resource is resident in physical memory.
*
* <p>A return value of {@code true} implies that it is highly likely that all of the data in this memory-mapped
* Resource is resident in physical memory and may therefore be accessed without incurring any virtual-memory page
* faults or I/O operations.</p>
*
* <p>A return value of {@code false} does not necessarily imply that all of the data in this memory-mapped Resource
* is not resident in physical memory.</p>
*
* <p>The returned value is a hint, rather than a guarantee, because the underlying operating system may have paged
* out some of this Resource's data by the time that an invocation of this method returns.</p>
*
* @return true if it is likely that all of the data in this memory-mapped Resource is resident in physical memory
*
* @throws IllegalStateException if this Resource is not <em>valid</em>.
* @throws IllegalStateException if this method is not accessed from the owning thread.
* @throws UnsupportedOperationException if this Resource is not memory-mapped, e.g. if {@code isMapped() == false}.
*/
boolean isLoaded();
/**
* If true, this is a <i>Memory</i> or <i>WritableMemory</i> instance, which provides the Memory API.
* The Memory API is the principal API for this Memory Component.
* It provides a rich variety of direct manipulations of four types of resources:
* On-heap memory, direct (off-heap) memory, memory-mapped files, and ByteBuffers.
* If false, this is a <i>Buffer</i> or <i>WritableBuffer</i> instance, which provides the Buffer API.
*
* <p>The Buffer API is largely parallel to the Memory API except that it adds a positional API
* similar to that in <i>ByteBuffer</i>. The positional API is a convenience when iterating over structured
* arrays, or buffering input or output streams (thus the name).</p>
*
* @return true if this is a <i>Memory</i> or <i>WritableMemory</i> instance, which provides the Memory API,
* otherwise this is a <i>Buffer</i> or <i>WritableBuffer</i> instance, which provides the Buffer API.
*/
boolean isMemoryApi();
/**
* Returns true if the backing resource is a memory-mapped file.
* @return true if the backing resource is a memory-mapped file.
*/
boolean isMemoryMappedResource();
/**
* If true, all put and get operations will assume the non-native ByteOrder.
* Otherwise, all put and get operations will assume the native ByteOrder.
* @return true, if all put and get operations will assume the non-native ByteOrder.
*/
boolean isNonNativeOrder();
/**
* Returns true if this object or the backing resource is read-only.
* @return true if this object or the backing resource is read-only.
*/
boolean isReadOnly();
/**
* Returns true if this instance is a region view of another Memory or Buffer
* @return true if this instance is a region view of another Memory or Buffer
*/
boolean isRegionView();
/**
* Returns true if the backing resource of <i>this</i> is identical with the backing resource
* of <i>that</i>. The capacities must be the same. If <i>this</i> is a region,
* the region offset must also be the same.
* @param that A different non-null Resource
* @return true if the backing resource of <i>this</i> is the same as the backing resource
* of <i>that</i>.
*/
boolean isSameResource(Resource that);
/**
* Returns true if this object is valid and has not been closed.
* This is relevant only for direct (off-heap) memory and memory-mapped Files.
* @return true if this object is valid and has not been closed.
*/
boolean isValid();
/**
* Loads the contents of this memory-mapped Resource into physical memory.
*
* <p>This method makes a best effort to ensure that, when it returns, this contents of the memory-mapped Resource is
* resident in physical memory. Invoking this method may cause some number of page faults and
* I/O operations to occur.</p>
*
* @throws IllegalStateException if this Resource is not <em>valid</em>.
* @throws IllegalStateException if this method is not accessed from the owning thread.
* @throws UnsupportedOperationException if this Resource is not memory-mapped, e.g. if {@code isMapped() == false}.
*/
void load();
/**
* Sets the Default MemoryRequestServer
* @param memReqSvr the given MemoryRequestServer.
*/
void setMemoryRequestServer(MemoryRequestServer memReqSvr);
/**
* Returns a description of this object with an optional formatted hex string of the data
* for the specified a range. Used primarily for testing.
* @param header a descriptive header
* @param offsetBytes offset bytes relative to this object start
* @param lengthBytes number of bytes to convert to a hex string
// @param withData include output listing of byte data in the given range
* @return a formatted hex string in a human readable array
*/
String toHexString(String header, long offsetBytes, int lengthBytes);
/**
* Returns the 64-bit hash of the sequence of bytes in this object specified by
* <i>offsetBytes</i>, <i>lengthBytes</i> and a <i>seed</i>. Note that the sequence of bytes is
* always processed in the same order independent of endianness.
*
* @param offsetBytes the given offset in bytes to the first byte of the byte sequence.
* @param lengthBytes the given length in bytes of the byte sequence.
* @param seed the given long seed.
* @return the 64-bit hash of the sequence of bytes in this object specified by
* <i>offsetBytes</i> and <i>lengthBytes</i>.
*/
long xxHash64(long offsetBytes, long lengthBytes, long seed);
/**
* Returns a 64-bit hash from a single long. This method has been optimized for speed when only
* a single hash of a long is required.
* @param in A long.
* @param seed A long valued seed.
* @return the hash.
*/
long xxHash64(long in, long seed);
}
| 2,333 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/DefaultMemoryRequestServer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory;
/**
* This is a simple implementation of the MemoryRequestServer that creates space on the Java heap
* for the requesting application. This capability is only available for direct, off-heap
* allocated memory.
*
* <p>Using this default implementation could be something like the following:
*
* <blockquote><pre>
* class OffHeap {
* WritableMemory mem;
* MemoryRequestServer memReqSvr = null;
*
* void add(Object something) {
*
* if (outOfSpace) { // determine if out-of-space
* long spaceNeeded = ...
*
* //Acquire the MemoryRequestServer from the direct Memory the first time.
* //Once acquired, this can be reused if more memory is needed later.
* //This is required for the default implementation because it returns memory on heap
* // and on-heap memory does not carry a reference to the MemoryRequestServer.
* memReqSvr = (memReqSvr == null) ? mem.getMemoryRequestServer() : memReqSvr;
*
* //Request bigger memory
* WritableMemory newMem = memReqSvr.request(mem, spaceNeeded);
*
* //Copy your data from the current memory to the new one and resize
* moveAndResize(mem, newMem);
*
* //You are done with the old memory, so request close.
* //Note that it is up to the owner of the WritableHandle whether or not to
* // actually close the resource.
* memReqSvr.requestClose(mem, newMem);
*
* mem = newMem; //update your reference to memory
* }
*
* //continue with the add process
* }
* }
* </pre></blockquote>
*
*
* @author Lee Rhodes
*/
public final class DefaultMemoryRequestServer implements MemoryRequestServer {
/**
* {@inheritDoc}
*
* <p>By default this allocates new memory requests on the Java heap.
*/
@Override
public WritableMemory request(final WritableMemory currentWritableMemory, final long capacityBytes) {
final WritableMemory wmem = WritableMemory.allocate((int)capacityBytes, currentWritableMemory.getByteOrder());
return wmem;
}
/**
* {@inheritDoc}
*
* <p>This method does nothing in this default implementation because it is application specific.
* This method must be overridden to explicitly close if desired.
* Otherwise, the AutoCloseable will eventually close the resource.
*/
@Override
public void requestClose(final WritableMemory memToRelease, final WritableMemory newMemory) {
//do nothing
}
}
| 2,334 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/BufferPositionInvariantsException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory;
public class BufferPositionInvariantsException extends MemoryException {
private static final long serialVersionUID = 1L;
/**
* The associated position operation used violated the positional invariants equation with required details
*
* @param details of the violation.
*/
public BufferPositionInvariantsException(final String details) {
super(details);
}
}
| 2,335 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/Memory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory;
import static org.apache.datasketches.memory.internal.Util.negativeCheck;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.channels.WritableByteChannel;
import java.util.Objects;
import org.apache.datasketches.memory.internal.BaseWritableMemoryImpl;
import org.apache.datasketches.memory.internal.Prim;
import org.apache.datasketches.memory.internal.ResourceImpl;
/**
* Defines the read-only API for offset access to a resource.
*
* <p>The classes in this package are not thread-safe.</p>
*
* @author Lee Rhodes
*/
public interface Memory extends Resource {
//BYTE BUFFER
/**
* Accesses the given <i>ByteBuffer</i> for read-only operations. The returned <i>Memory</i> object has
* the same byte order, as the given <i>ByteBuffer</i>.
* @param byteBuffer the given <i>ByteBuffer</i>. It must be non-null and with capacity ≥ 0.
* @return a new <i>Memory</i> for read-only operations on the given <i>ByteBuffer</i>.
*/
static Memory wrap(ByteBuffer byteBuffer) {
return wrap(byteBuffer, byteBuffer.order());
}
/**
* Accesses the given <i>ByteBuffer</i> for read-only operations. The returned <i>Memory</i> object has
* the given byte order, ignoring the byte order of the given <i>ByteBuffer</i> for future reads and writes.
* @param byteBuffer the given <i>ByteBuffer</i>. It must be non-null and with capacity ≥ 0.
* @param byteOrder the byte order to be used. It must be non-null.
* @return a new <i>Memory</i> for read-only operations on the given <i>ByteBuffer</i>.
*/
static Memory wrap(ByteBuffer byteBuffer, ByteOrder byteOrder) {
Objects.requireNonNull(byteBuffer, "byteBuffer must not be null");
Objects.requireNonNull(byteOrder, "byteOrder must not be null");
negativeCheck(byteBuffer.capacity(), "byteBuffer");
return BaseWritableMemoryImpl.wrapByteBuffer(byteBuffer, true, byteOrder, null);
}
//MAP
/**
* Maps the entire given file into native-ordered <i>Memory</i> for read operations
* Calling this method is equivalent to calling
* {@link #map(File, long, long, ByteOrder) map(file, 0, file.length(), ByteOrder.nativeOrder())}.
* @param file the given file to map. It must be non-null, length ≥ 0, and readable.
* @return <i>Memory</i> for managing the mapped memory.
*/
static Memory map(File file) {
return map(file, 0, file.length(), ByteOrder.nativeOrder());
}
/**
* Maps the specified portion of the given file into <i>Memory</i> for read operations.
* @param file the given file to map. It must be non-null and readable.
* @param fileOffsetBytes the position in the given file in bytes. It must not be negative.
* @param capacityBytes the size of the mapped memory. It must not be negative.
* @param byteOrder the byte order to be used for the mapped memory. It must be non-null.
* @return <i>Memory</i> for managing the mapped memory.
*/
static Memory map(File file, long fileOffsetBytes, long capacityBytes, ByteOrder byteOrder) {
Objects.requireNonNull(file, "file must be non-null.");
Objects.requireNonNull(byteOrder, "byteOrder must be non-null.");
if (!file.canRead()) { throw new ReadOnlyException("file must be readable."); }
negativeCheck(fileOffsetBytes, "fileOffsetBytes");
negativeCheck(capacityBytes, "capacityBytes");
return BaseWritableMemoryImpl.wrapMap(file, fileOffsetBytes, capacityBytes, true, byteOrder);
}
//REGIONS
/**
* A region is a read-only view of this object.
* <ul>
* <li>Returned object's origin = this object's origin + offsetBytes</li>
* <li>Returned object's capacity = capacityBytes</li>
* </ul>
* @param offsetBytes the starting offset with respect to the origin of this <i>Memory</i>. It must be ≥ 0.
* @param capacityBytes the capacity of the region in bytes. It must be ≥ 0.
* @return a new <i>Memory</i> representing the defined region based on the given
* offsetBytes and capacityBytes.
*/
default Memory region(long offsetBytes, long capacityBytes) {
return region(offsetBytes, capacityBytes, ByteOrder.nativeOrder());
}
/**
* A region is a read-only view of this object.
* <ul>
* <li>Returned object's origin = this object's origin + <i>offsetBytes</i></li>
* <li>Returned object's capacity = <i>capacityBytes</i></li>
* <li>Returned object's byte order = <i>byteOrder</i></li>
* </ul>
* @param offsetBytes the starting offset with respect to the origin of this Memory. It must be ≥ 0.
* @param capacityBytes the capacity of the region in bytes. It must be ≥ 0.
* @param byteOrder the given byte order. It must be non-null.
* @return a new <i>Memory</i> representing the defined region based on the given
* offsetBytes, capacityBytes and byteOrder.
*/
Memory region(long offsetBytes, long capacityBytes, ByteOrder byteOrder);
//AS BUFFER
/**
* Returns a new <i>Buffer</i> view of this object.
* <ul>
* <li>Returned object's origin = this object's origin</li>
* <li>Returned object's <i>start</i> = 0</li>
* <li>Returned object's <i>position</i> = 0</li>
* <li>Returned object's <i>end</i> = this object's capacity</li>
* <li>Returned object's <i>capacity</i> = this object's capacity</li>
* <li>Returned object's <i>start</i>, <i>position</i> and <i>end</i> are mutable</li>
* </ul>
* @return a new <i>Buffer</i>
*/
default Buffer asBuffer() {
return asBuffer(ByteOrder.nativeOrder());
}
/**
* Returns a new <i>Buffer</i> view of this object, with the given
* byte order.
* <ul>
* <li>Returned object's origin = this object's origin</li>
* <li>Returned object's <i>start</i> = 0</li>
* <li>Returned object's <i>position</i> = 0</li>
* <li>Returned object's <i>end</i> = this object's capacity</li>
* <li>Returned object's <i>capacity</i> = this object's capacity</li>
* <li>Returned object's <i>start</i>, <i>position</i> and <i>end</i> are mutable</li>
* </ul>
* @param byteOrder the given byte order
* @return a new <i>Buffer</i> with the given byteOrder.
*/
Buffer asBuffer(ByteOrder byteOrder);
//ACCESS PRIMITIVE HEAP ARRAYS for readOnly
/**
* Wraps the given primitive array for read operations assuming native byte order.
* @param array the given primitive array.
* @return a new <i>Memory</i> for read operations
*/
static Memory wrap(byte[] array) {
Objects.requireNonNull(array, "array must be non-null");
return wrap(array, 0, array.length, ByteOrder.nativeOrder());
}
/**
* Wraps the given primitive array for read operations with the given byte order.
* @param array the given primitive array.
* @param byteOrder the byte order to be used
* @return a new <i>Memory</i> for read operations
*/
static Memory wrap(byte[] array, ByteOrder byteOrder) {
return wrap(array, 0, array.length, byteOrder);
}
/**
* Wraps the given primitive array for read operations with the given byte order.
* @param array the given primitive array.
* @param offsetBytes the byte offset into the given array
* @param lengthBytes the number of bytes to include from the given array
* @param byteOrder the byte order to be used
* @return a new <i>Memory</i> for read operations
*/
static Memory wrap(byte[] array, int offsetBytes, int lengthBytes, ByteOrder byteOrder) {
Objects.requireNonNull(array, "array must be non-null");
Objects.requireNonNull(byteOrder, "byteOrder must be non-null");
negativeCheck(offsetBytes, "offsetBytes");
negativeCheck(lengthBytes, "lengthBytes");
ResourceImpl.checkBounds(offsetBytes, lengthBytes, array.length);
return BaseWritableMemoryImpl.wrapHeapArray(array, 0, lengthBytes, true, ByteOrder.nativeOrder(), null);
}
/**
* Wraps the given primitive array for read operations assuming native byte order.
* @param array the given primitive array.
* @return a new <i>Memory</i> for read operations
*/
static Memory wrap(boolean[] array) {
Objects.requireNonNull(array, "array must be non-null");
final long lengthBytes = array.length << Prim.BOOLEAN.shift();
return BaseWritableMemoryImpl.wrapHeapArray(array, 0, lengthBytes, true, ByteOrder.nativeOrder(), null);
}
/**
* Wraps the given primitive array for read operations assuming native byte order.
* @param array the given primitive array.
* @return a new <i>Memory</i> for read operations
*/
static Memory wrap(char[] array) {
Objects.requireNonNull(array, "array must be non-null");
final long lengthBytes = array.length << Prim.CHAR.shift();
return BaseWritableMemoryImpl.wrapHeapArray(array, 0L, lengthBytes, true, ByteOrder.nativeOrder(), null);
}
/**
* Wraps the given primitive array for read operations assuming native byte order.
* @param array the given primitive array.
* @return a new <i>Memory</i> for read operations
*/
static Memory wrap(short[] array) {
Objects.requireNonNull(array, "arr must be non-null");
final long lengthBytes = array.length << Prim.SHORT.shift();
return BaseWritableMemoryImpl.wrapHeapArray(array, 0L, lengthBytes, true, ByteOrder.nativeOrder(), null);
}
/**
* Wraps the given primitive array for read operations assuming native byte order.
* @param array the given primitive array.
* @return a new <i>Memory</i> for read operations
*/
static Memory wrap(int[] array) {
Objects.requireNonNull(array, "arr must be non-null");
final long lengthBytes = array.length << Prim.INT.shift();
return BaseWritableMemoryImpl.wrapHeapArray(array, 0L, lengthBytes, true, ByteOrder.nativeOrder(), null);
}
/**
* Wraps the given primitive array for read operations assuming native byte order.
* @param array the given primitive array.
* @return a new <i>Memory</i> for read operations
*/
static Memory wrap(long[] array) {
Objects.requireNonNull(array, "arr must be non-null");
final long lengthBytes = array.length << Prim.LONG.shift();
return BaseWritableMemoryImpl.wrapHeapArray(array, 0L, lengthBytes, true, ByteOrder.nativeOrder(), null);
}
/**
* Wraps the given primitive array for read operations assuming native byte order.
* @param array the given primitive array.
* @return a new <i>Memory</i> for read operations
*/
static Memory wrap(float[] array) {
Objects.requireNonNull(array, "arr must be non-null");
final long lengthBytes = array.length << Prim.FLOAT.shift();
return BaseWritableMemoryImpl.wrapHeapArray(array, 0L, lengthBytes, true, ByteOrder.nativeOrder(), null);
}
/**
* Wraps the given primitive array for read operations assuming native byte order.
* @param array the given primitive array.
* @return a new <i>Memory</i> for read operations
*/
static Memory wrap(double[] array) {
Objects.requireNonNull(array, "arr must be non-null");
final long lengthBytes = array.length << Prim.DOUBLE.shift();
return BaseWritableMemoryImpl.wrapHeapArray(array, 0L, lengthBytes, true, ByteOrder.nativeOrder(), null);
}
//PRIMITIVE getX() and getXArray()
/**
* Gets the boolean value at the given offset
* @param offsetBytes offset bytes relative to this Memory start
* @return the boolean at the given offset
*/
boolean getBoolean(long offsetBytes);
/**
* Gets the boolean array at the given offset
* @param offsetBytes offset bytes relative to this Memory start
* @param dstArray The preallocated destination array.
* @param dstOffsetBooleans offset in array units
* @param lengthBooleans number of array units to transfer
*/
void getBooleanArray(long offsetBytes, boolean[] dstArray, int dstOffsetBooleans, int lengthBooleans);
/**
* Gets the byte value at the given offset
* @param offsetBytes offset bytes relative to this Memory start
* @return the byte at the given offset
*/
byte getByte(long offsetBytes);
/**
* Gets the byte array at the given offset
* @param offsetBytes offset bytes relative to this Memory start
* @param dstArray The preallocated destination array.
* @param dstOffsetBytes offset in array units
* @param lengthBytes number of array units to transfer
*/
void getByteArray(long offsetBytes, byte[] dstArray, int dstOffsetBytes, int lengthBytes);
/**
* Gets the char value at the given offset
* @param offsetBytes offset bytes relative to this Memory start
* @return the char at the given offset
*/
char getChar(long offsetBytes);
/**
* Gets the char array at the given offset
* @param offsetBytes offset bytes relative to this Memory start
* @param dstArray The preallocated destination array.
* @param dstOffsetChars offset in array units
* @param lengthChars number of array units to transfer
*/
void getCharArray(long offsetBytes, char[] dstArray, int dstOffsetChars, int lengthChars);
/**
* Gets UTF-8 encoded bytes from this Memory, starting at offsetBytes to a length of
* utf8LengthBytes, decodes them into characters and appends them to the given Appendable.
* This is specifically designed to reduce the production of intermediate objects (garbage),
* thus significantly reducing pressure on the JVM Garbage Collector.
* @param offsetBytes offset bytes relative to the Memory start
* @param utf8LengthBytes the number of encoded UTF-8 bytes to decode. It is assumed that the
* caller has the correct number of utf8 bytes required to decode the number of characters
* to be appended to dst. Characters outside the ASCII range can require 2, 3 or 4 bytes per
* character to decode.
* @param dst the destination Appendable to append the decoded characters to.
* @return the number of characters decoded
* @throws IOException if dst.append() throws IOException
* @throws Utf8CodingException in case of malformed or illegal UTF-8 input
*/
int getCharsFromUtf8(long offsetBytes, int utf8LengthBytes, Appendable dst)
throws IOException, Utf8CodingException;
/**
* Gets UTF-8 encoded bytes from this Memory, starting at offsetBytes to a length of
* utf8LengthBytes, decodes them into characters and appends them to the given StringBuilder.
* This method does *not* reset the length of the destination StringBuilder before appending
* characters to it.
* This is specifically designed to reduce the production of intermediate objects (garbage),
* thus significantly reducing pressure on the JVM Garbage Collector.
* @param offsetBytes offset bytes relative to the Memory start
* @param utf8LengthBytes the number of encoded UTF-8 bytes to decode. It is assumed that the
* caller has the correct number of utf8 bytes required to decode the number of characters
* to be appended to dst. Characters outside the ASCII range can require 2, 3 or 4 bytes per
* character to decode.
* @param dst the destination StringBuilder to append decoded characters to.
* @return the number of characters decoded.
* @throws Utf8CodingException in case of malformed or illegal UTF-8 input
*/
int getCharsFromUtf8(long offsetBytes, int utf8LengthBytes, StringBuilder dst)
throws Utf8CodingException;
/**
* Gets the double value at the given offset
* @param offsetBytes offset bytes relative to this Memory start
* @return the double at the given offset
*/
double getDouble(long offsetBytes);
/**
* Gets the double array at the given offset
* @param offsetBytes offset bytes relative to this Memory start
* @param dstArray The preallocated destination array.
* @param dstOffsetDoubles offset in array units
* @param lengthDoubles number of array units to transfer
*/
void getDoubleArray(long offsetBytes, double[] dstArray, int dstOffsetDoubles, int lengthDoubles);
/**
* Gets the float value at the given offset
* @param offsetBytes offset bytes relative to this Memory start
* @return the float at the given offset
*/
float getFloat(long offsetBytes);
/**
* Gets the float array at the given offset
* @param offsetBytes offset bytes relative to this Memory start
* @param dstArray The preallocated destination array.
* @param dstOffsetFloats offset in array units
* @param lengthFloats number of array units to transfer
*/
void getFloatArray(long offsetBytes, float[] dstArray, int dstOffsetFloats, int lengthFloats);
/**
* Gets the int value at the given offset
* @param offsetBytes offset bytes relative to this Memory start
* @return the int at the given offset
*/
int getInt(long offsetBytes);
/**
* Gets the int array at the given offset
* @param offsetBytes offset bytes relative to this Memory start
* @param dstArray The preallocated destination array.
* @param dstOffsetInts offset in array units
* @param lengthInts number of array units to transfer
*/
void getIntArray(long offsetBytes, int[] dstArray, int dstOffsetInts, int lengthInts);
/**
* Gets the long value at the given offset
* @param offsetBytes offset bytes relative to this Memory start
* @return the long at the given offset
*/
long getLong(long offsetBytes);
/**
* Gets the long array at the given offset
* @param offsetBytes offset bytes relative to this Memory start
* @param dstArray The preallocated destination array.
* @param dstOffsetLongs offset in array units
* @param lengthLongs number of array units to transfer
*/
void getLongArray(long offsetBytes, long[] dstArray, int dstOffsetLongs, int lengthLongs);
/**
* Gets the short value at the given offset
* @param offsetBytes offset bytes relative to this Memory start
* @return the short at the given offset
*/
short getShort(long offsetBytes);
/**
* Gets the short array at the given offset
* @param offsetBytes offset bytes relative to this Memory start
* @param dstArray The preallocated destination array.
* @param dstOffsetShorts offset in array units
* @param lengthShorts number of array units to transfer
*/
void getShortArray(long offsetBytes, short[] dstArray, int dstOffsetShorts, int lengthShorts);
//SPECIAL PRIMITIVE READ METHODS: compareTo, copyTo, writeTo
/**
* Compares the bytes of this Memory to <i>that</i> Memory.
* Returns <i>(this < that) ? (some negative value) : (this > that) ? (some positive value)
* : 0;</i>.
* If all bytes are equal up to the shorter of the two lengths, the shorter length is considered
* to be less than the other.
* @param thisOffsetBytes the starting offset for <i>this Memory</i>
* @param thisLengthBytes the length of the region to compare from <i>this Memory</i>
* @param that the other Memory to compare with
* @param thatOffsetBytes the starting offset for <i>that Memory</i>
* @param thatLengthBytes the length of the region to compare from <i>that Memory</i>
* @return <i>(this < that) ? (some negative value) : (this > that) ? (some positive value)
* : 0;</i>
*/
int compareTo(long thisOffsetBytes, long thisLengthBytes, Memory that,
long thatOffsetBytes, long thatLengthBytes);
/**
* Copies bytes from a source range of this Memory to a destination range of the given Memory
* with the same semantics when copying between overlapping ranges of bytes as method
* {@link java.lang.System#arraycopy(Object, int, Object, int, int)} has. However, if the source
* and the destination ranges are exactly the same, this method throws {@link
* IllegalArgumentException}, because it should never be needed in real-world scenarios and
* therefore indicates a bug.
* @param srcOffsetBytes the source offset for this Memory
* @param destination the destination Memory, which may not be Read-Only.
* @param dstOffsetBytes the destination offset
* @param lengthBytes the number of bytes to copy
*/
void copyTo(long srcOffsetBytes, WritableMemory destination, long dstOffsetBytes, long lengthBytes);
/**
* Writes bytes from a source range of this Memory to the given {@code WritableByteChannel}.
* @param offsetBytes the source offset for this Memory
* @param lengthBytes the number of bytes to copy
* @param out the destination WritableByteChannel
* @throws IOException may occur while writing to the WritableByteChannel
*/
void writeTo(long offsetBytes, long lengthBytes, WritableByteChannel out)
throws IOException;
}
| 2,336 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/Buffer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory;
import static org.apache.datasketches.memory.internal.Util.negativeCheck;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Objects;
import org.apache.datasketches.memory.internal.BaseWritableBufferImpl;
/**
* Defines the read-only API for relative positional access to a resource.
*
* <p>The classes in this package are not thread-safe.</p>
*
* @author Lee Rhodes
*/
public interface Buffer extends BaseBuffer {
//BYTE BUFFER
/**
* Accesses the given ByteBuffer for read-only operations. The returned Buffer object has the
* same byte order, as the given ByteBuffer.
* @param byteBuffer the given ByteBuffer, must not be null.
* @return a new Buffer for read-only operations on the given ByteBuffer.
*/
static Buffer wrap(ByteBuffer byteBuffer) {
return wrap(byteBuffer, byteBuffer.order());
}
/**
* Accesses the given ByteBuffer for read-only operations. The returned Buffer object has
* the given byte order, ignoring the byte order of the given ByteBuffer.
* @param byteBuffer the given ByteBuffer, must not be null
* @param byteOrder the byte order to be used, which may be independent of the byte order
* state of the given ByteBuffer
* @return a new Buffer for read-only operations on the given ByteBuffer.
*/
static Buffer wrap(ByteBuffer byteBuffer, ByteOrder byteOrder) {
Objects.requireNonNull(byteBuffer, "byteBuffer must not be null");
Objects.requireNonNull(byteOrder, "byteOrder must not be null");
negativeCheck(byteBuffer.capacity(), "byteBuffer");
return BaseWritableBufferImpl.wrapByteBuffer(byteBuffer, true, byteOrder, null);
}
//DUPLICATES
/**
* Returns a read-only duplicate view of this Buffer with the same but independent values of
* <i>start</i>, <i>position</i> and <i>end</i>.
* <ul>
* <li>Returned object's origin = this object's origin</li>
* <li>Returned object's <i>start</i> = this object's <i>start</i></li>
* <li>Returned object's <i>position</i> = this object's <i>position</i></li>
* <li>Returned object's <i>end</i> = this object's <i>end</i></li>
* <li>Returned object's <i>capacity</i> = this object' <i>capacityBytes</i></li>
* <li>Returned object's <i>start</i>, <i>position</i> and <i>end</i> are mutable and
* independent of this object's <i>start</i>, <i>position</i> and <i>end</i></li>
* </ul>
* @return a read-only duplicate view of this Buffer with the same but independent values of
* <i>start</i>, <i>position</i> and <i>end</i>.
*/
Buffer duplicate();
/**
* Returns a read-only duplicate view of this Buffer with the same but independent values of
* <i>start</i>, <i>position</i> and <i>end</i>, but with the specified byteOrder.
* <ul>
* <li>Returned object's origin = this object's origin</li>
* <li>Returned object's <i>start</i> = this object's <i>start</i></li>
* <li>Returned object's <i>position</i> = this object's <i>position</i></li>
* <li>Returned object's <i>end</i> = this object's <i>end</i></li>
* <li>Returned object's <i>capacity</i> = this object' <i>capacityBytes</i></li>
* <li>Returned object's <i>start</i>, <i>position</i> and <i>end</i> are mutable and
* independent of this object's <i>start</i>, <i>position</i> and <i>end</i></li>
* </ul>
* @param byteOrder the given <i>ByteOrder</i>.
* @return a read-only duplicate view of this Buffer with the same but independent values of
* <i>start</i>, <i>position</i> and <i>end</i>.
*/
Buffer duplicate(ByteOrder byteOrder);
//NO MAP
//REGIONS
/**
* A region is a read-only view of this object.
* <ul>
* <li>Returned object's origin = this object's <i>position</i></li>
* <li>Returned object's <i>start</i> = 0</li>
* <li>Returned object's <i>position</i> = 0</li>
* <li>Returned object's <i>end</i> = this object's (<i>end</i> - <i>position</i>)</li>
* <li>Returned object's <i>capacity</i> = this object's (<i>end</i> - <i>position</i>)</li>
* <li>Returned object's <i>start</i>, <i>position</i> and <i>end</i> are mutable and
* independent of this object's <i>start</i>, <i>position</i> and <i>end</i></li>
* </ul>
* @return a new <i>Buffer</i> representing the defined region based on the current
* <i>position</i> and <i>end</i>.
*/
Buffer region();
/**
* A region is a read-only view of this object.
* <ul>
* <li>Returned object's origin = this objects' origin + <i>offsetBytes</i></li>
* <li>Returned object's <i>start</i> = 0</li>
* <li>Returned object's <i>position</i> = 0</li>
* <li>Returned object's <i>end</i> = <i>capacityBytes</i></li>
* <li>Returned object's <i>capacity</i> = <i>capacityBytes</i></li>
* <li>Returned object's <i>start</i>, <i>position</i> and <i>end</i> are mutable and
* independent of this object's <i>start</i>, <i>position</i> and <i>end</i></li>
* <li>Returned object's byte order = <i>byteOrder</i></li>
* </ul>
*
* @param offsetBytes the starting offset with respect to the origin of this <i>WritableBuffer</i>
* @param capacityBytes the <i>capacity</i> of the returned region in bytes
* @param byteOrder the given byte order
* @return a new <i>Buffer</i> representing the defined writable region
* based on the current <i>position</i>, <i>end</i> and byteOrder.
*/
Buffer region(long offsetBytes, long capacityBytes, ByteOrder byteOrder);
//AS MEMORY
/**
* Convert this Buffer to a Memory. The current <i>start</i>, <i>position</i> and <i>end</i>
* are ignored.
* @return Memory
*/
default Memory asMemory() {
return asMemory(getByteOrder());
}
/**
* Convert this Buffer to a Memory with the given byte order.
* The current <i>start</i>, <i>position</i> and <i>end</i> are ignored.
* @param byteOrder the given byte order.
* @return Memory
*/
Memory asMemory(ByteOrder byteOrder);
//NO ACCESS PRIMITIVE HEAP ARRAYS for readOnly
//PRIMITIVE getX() and getXArray()
/**
* Gets the boolean value at the current position.
* Increments the position by 1.
* @return the boolean at the current position
*/
boolean getBoolean();
/**
* Gets the boolean value at the given offset.
* This does not change the position.
* @param offsetBytes offset bytes relative to this Memory start
* @return the boolean at the given offset
*/
boolean getBoolean(long offsetBytes);
/**
* Gets the boolean array at the current position.
* Increments the position by <i>lengthBooleans - dstOffsetBooleans</i>.
* @param dstArray The preallocated destination array.
* @param dstOffsetBooleans offset in array units
* @param lengthBooleans number of array units to transfer
*/
void getBooleanArray(boolean[] dstArray, int dstOffsetBooleans, int lengthBooleans);
/**
* Gets the byte value at the current position.
* Increments the position by <i>Byte.BYTES</i>.
* @return the byte at the current position
*/
byte getByte();
/**
* Gets the byte value at the given offset.
* This does not change the position.
* @param offsetBytes offset bytes relative to this Memory start
* @return the byte at the given offset
*/
byte getByte(long offsetBytes);
/**
* Gets the byte array at the current position.
* Increments the position by <i>Byte.BYTES * (lengthBytes - dstOffsetBytes)</i>.
* @param dstArray The preallocated destination array.
* @param dstOffsetBytes offset in array units
* @param lengthBytes number of array units to transfer
*/
void getByteArray(byte[] dstArray, int dstOffsetBytes, int lengthBytes);
/**
* Gets the char value at the current position.
* Increments the position by <i>Character.BYTES</i>.
* @return the char at the current position
*/
char getChar();
/**
* Gets the char value at the given offset.
* This does not change the position.
* @param offsetBytes offset bytes relative to this Memory start
* @return the char at the given offset
*/
char getChar(long offsetBytes);
/**
* Gets the char array at the current position.
* Increments the position by <i>Character.BYTES * (lengthChars - dstOffsetChars)</i>.
* @param dstArray The preallocated destination array.
* @param dstOffsetChars offset in array units
* @param lengthChars number of array units to transfer
*/
void getCharArray(char[] dstArray, int dstOffsetChars, int lengthChars);
/**
* Gets the double value at the current position.
* Increments the position by <i>Double.BYTES</i>.
* @return the double at the current position
*/
double getDouble();
/**
* Gets the double value at the given offset.
* This does not change the position.
* @param offsetBytes offset bytes relative to this Memory start
* @return the double at the given offset
*/
double getDouble(long offsetBytes);
/**
* Gets the double array at the current position.
* Increments the position by <i>Double.BYTES * (lengthDoubles - dstOffsetDoubles)</i>.
* @param dstArray The preallocated destination array.
* @param dstOffsetDoubles offset in array units
* @param lengthDoubles number of array units to transfer
*/
void getDoubleArray(double[] dstArray, int dstOffsetDoubles, int lengthDoubles);
/**
* Gets the float value at the current position.
* Increments the position by <i>Float.BYTES</i>.
* @return the float at the current position
*/
float getFloat();
/**
* Gets the float value at the given offset.
* This does not change the position.
* @param offsetBytes offset bytes relative to this Memory start
* @return the float at the given offset
*/
float getFloat(long offsetBytes);
/**
* Gets the float array at the current position.
* Increments the position by <i>Float.BYTES * (lengthFloats - dstOffsetFloats)</i>.
* @param dstArray The preallocated destination array.
* @param dstOffsetFloats offset in array units
* @param lengthFloats number of array units to transfer
*/
void getFloatArray(float[] dstArray, int dstOffsetFloats, int lengthFloats);
/**
* Gets the int value at the current position.
* Increments the position by <i>Integer.BYTES</i>.
* @return the int at the current position
*/
int getInt();
/**
* Gets the int value at the given offset.
* This does not change the position.
* @param offsetBytes offset bytes relative to this Memory start
* @return the int at the given offset
*/
int getInt(long offsetBytes);
/**
* Gets the int array at the current position.
* Increments the position by <i>Integer.BYTES * (lengthInts - dstOffsetInts)</i>.
* @param dstArray The preallocated destination array.
* @param dstOffsetInts offset in array units
* @param lengthInts number of array units to transfer
*/
void getIntArray(int[] dstArray, int dstOffsetInts, int lengthInts);
/**
* Gets the long value at the current position.
* Increments the position by <i>Long.BYTES</i>.
* @return the long at the current position
*/
long getLong();
/**
* Gets the long value at the given offset.
* This does not change the position.
* @param offsetBytes offset bytes relative to this Memory start
* @return the long at the given offset
*/
long getLong(long offsetBytes);
/**
* Gets the long array at the current position.
* Increments the position by <i>Long.BYTES * (lengthLongs - dstOffsetLongs)</i>.
* @param dstArray The preallocated destination array.
* @param dstOffsetLongs offset in array units
* @param lengthLongs number of array units to transfer
*/
void getLongArray(long[] dstArray, int dstOffsetLongs, int lengthLongs);
/**
* Gets the short value at the current position.
* Increments the position by <i>Short.BYTES</i>.
* @return the short at the current position
*/
short getShort();
/**
* Gets the short value at the given offset.
* This does not change the position.
* @param offsetBytes offset bytes relative to this Memory start
* @return the short at the given offset
*/
short getShort(long offsetBytes);
/**
* Gets the short array at the current position.
* Increments the position by <i>Short.BYTES * (lengthShorts - dstOffsetShorts)</i>.
* @param dstArray The preallocated destination array.
* @param dstOffsetShorts offset in array units
* @param lengthShorts number of array units to transfer
*/
void getShortArray(short[] dstArray, int dstOffsetShorts, int lengthShorts);
//SPECIAL PRIMITIVE READ METHODS: compareTo
/**
* Compares the bytes of this Buffer to <i>that</i> Buffer.
* This uses absolute offsets not the start, position and end.
* Returns <i>(this < that) ? (some negative value) : (this > that) ? (some positive value)
* : 0;</i>.
* If all bytes are equal up to the shorter of the two lengths, the shorter length is
* considered to be less than the other.
* @param thisOffsetBytes the starting offset for <i>this Buffer</i>
* @param thisLengthBytes the length of the region to compare from <i>this Buffer</i>
* @param that the other Buffer to compare with
* @param thatOffsetBytes the starting offset for <i>that Buffer</i>
* @param thatLengthBytes the length of the region to compare from <i>that Buffer</i>
* @return <i>(this < that) ? (some negative value) : (this > that) ? (some positive value)
* : 0;</i>
*/
int compareTo(long thisOffsetBytes, long thisLengthBytes, Buffer that,
long thatOffsetBytes, long thatLengthBytes);
}
| 2,337 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/Utf8CodingException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory;
/**
* This exception will be thrown for errors encountered during either the encoding of characters
* to Utf8 bytes, or the decoding of Utf8 bytes to characters.
*
* @author Lee Rhodes
*/
public final class Utf8CodingException extends RuntimeException {
private static final long serialVersionUID = 1L;
/**
* A coding exception occured processing UTF_8
* @param message the error message
*/
public Utf8CodingException(final String message) {
super(message);
}
//DECODE
/**
* Exception for a short UTF_8 Decode Byte Sequence
* @param leadByte the given lead byte
* @param address the given address
* @param limit the given limit
* @param required what is required
* @return the exception
*/
public static Utf8CodingException shortUtf8DecodeByteSequence(final byte leadByte, final long address,
final long limit, final int required) {
final String s = "Too few Utf8 decode bytes remaining given the leading byte. "
+ shortSeq(leadByte, address, limit, required);
return new Utf8CodingException(s);
}
/**
* Exception for an illegal UTF_8 Decode Byte Sequence
* @param bytes the illegal byte sequence
* @return the exception.
*/
public static Utf8CodingException illegalUtf8DecodeByteSequence(final byte[] bytes) {
final String s = "Invalid UTF-8 decode byte sequence: " + badBytes(bytes);
return new Utf8CodingException(s);
}
//ENCODE
/**
* Exception for out-of-memory
* @return the exception
*/
public static Utf8CodingException outOfMemory() {
final String s = "Out-of-memory with characters remaining to be encoded";
return new Utf8CodingException(s);
}
/**
* Exception for an unpaired surrogate
* @param c The last char to encode is an unpaired surrogate
* @return the exception plus the unpaired surrogate character
*/
public static Utf8CodingException unpairedSurrogate(final char c) {
final String s = "Last char to encode is an unpaired surrogate: 0X"
+ Integer.toHexString(c & 0XFFFF);
return new Utf8CodingException(s);
}
/**
* Exception for a short UTF_8 encode byte length
* @param remaining The surrogate pair that is short
* @return the exception plus the surrogate pair that is short
*/
public static Utf8CodingException shortUtf8EncodeByteLength(final int remaining) {
final String s = "Too few MemoryImpl bytes to encode a surrogate pair: " + remaining;
return new Utf8CodingException(s);
}
/**
* Exception for an illegal surrogate pair
* @param c1 the first character of the pair
* @param c2 the second character of the pair
* @return the exception plus the illegal pair
*/
public static Utf8CodingException illegalSurrogatePair(final char c1, final char c2) {
final String s = "Illegal Surrogate Pair: Char 1: " + Integer.toHexString(c1 & 0XFFFF)
+ ", Char 2: " + Integer.toHexString(c2 & 0XFFFF);
return new Utf8CodingException(s);
}
private static String shortSeq(final byte leadByte, final long address, final long limit,
final int required) {
final String s = "Lead byte: " + Integer.toHexString(leadByte & 0xFF)
+ ", offset: 0X" + Long.toHexString(address)
+ ", limit: 0X" + Long.toHexString(limit)
+ ", required: " + required;
return s;
}
private static String badBytes(final byte[] bytes) {
final StringBuilder sb = new StringBuilder();
final int len = bytes.length;
int i = 0;
for (; i < (len - 1); i++) {
sb.append("0X" + Integer.toHexString(bytes[i] & 0XFF)).append(", ");
}
sb.append("0X" + Integer.toHexString(bytes[i] & 0XFF));
return sb.toString();
}
}
| 2,338 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* <p>This package provides high performance primitive and primitive array access to direct (native),
* off-heap memory and memory-mapped file resources, consistent views into
* {@link java.nio.ByteBuffer}, and on-heap primitive arrays. It can be used as a more
* comprehensive and flexible replacement for {@link java.nio.ByteBuffer}.
* </p>
*
* <p>In addition, this package provides:</p>
*
* <ul><li>Two different access APIs: read-only {@link org.apache.datasketches.memory.Memory} and
* {@link org.apache.datasketches.memory.WritableMemory} for absolute offset access,
* and read-only {@link org.apache.datasketches.memory.Buffer} and
* {@link org.apache.datasketches.memory.WritableBuffer}
* for relative positional access (similar to ByteBuffer).</li>
*
* <li>Clean separation of Read-only API from Writable API, which makes writable versus read-only
* resources detectable at compile time.</li>
*
* <li>The conversion from Writable to read-only is just a cast, so no unnecessary objects are
* created. For example:
* <blockquote><pre>
* WritableMemory wMem = ...
* Memory mem = wMem;
* </pre></blockquote>
* </li>
*
* <li> {@link java.lang.AutoCloseable} for the external resources that require it,
* which enables compile-time checks for non-closed resources.</li>
*
* <li>Immediate invalidation of all downstream references of an AutoCloseable
* resource when that resource is closed, either manually or by the JVM.
* This virtually eliminates the possibility of accidentally writing into the memory space
* previously owned by a closed resource.</li>
*
* <li>Improved performance over the prior Memory implementation.</li>
*
* <li>Cleaner internal architecture, which will make it easier to extend in the future.</li>
*
* <li>No external dependencies, which makes it simple to install in virtually any Java environment.
* </li>
* </ul>
*
* <p>More specifically, this package provides access to four different types of resources using
* two different access APIs. These resources can be viewed as contiguous blobs of bytes that provide at least
* byte-level read and write access. The four resources are:</p>
*
* <ul><li>Direct (a.k.a. Native) off-heap memory allocated by the user.</li>
* <li>Memory-mapped files, both writable and read-only.</li>
* <li>{@code ByteBuffers}, both heap-based and direct, writable and read-only.</li>
* <li>Heap-based primitive arrays, which can be accessed as writable or read-only.</li>
* </ul>
*
* <p>The two different access APIs are:</p>
* <ul><li><i>Memory, WritableMemory</i>: Absolute offset addressing into a resource.</li>
* <li><i>Buffer, WritableBuffer</i>: Position relative addressing into a resource.</li>
* </ul>
*
* <p>In addition, all combinations of access APIs and backing resources can be accessed via
* multibyte primitive methods (e.g.
* <i>getLong(...), getLongArray(...), putLong(...), putLongArray(...)</i>) as either
* {@link java.nio.ByteOrder#BIG_ENDIAN} or {@link java.nio.ByteOrder#LITTLE_ENDIAN}.</p>
*
* <p>The resources don't know or care about the access APIs, and the access
* APIs don't really know or care what resource they are accessing.</p>
*
* <p>A Direct or memory-mapped file resource can also be explicitly closed by the user</p>
* <blockquote><pre>
* //Using try-with-resources block:
* try (WritableMemory wmem = WritableMemory.map(File file)) {
* doWork(wMem) // read and write to memory mapped file.
* }
*
* //Using explicit close():
* WritableMemory wmem = WritableMemory.map(File file);
* doWork(wMem) // read and write to memory mapped file.
* wmem.close();
* </pre></blockquote>
*
* <p>Whatever thread of your process is responsible for allocating a direct or memory-mapped resource
* must be responsible for closing it or making sure it gets closed. This is also true for the special
* memory-mapping methods load(), isLoaded() and force().</p>
*
*<p>Moving back and forth between <i>Memory</i> and <i>Buffer</i>:</p>
*<blockquote><pre>
* Memory mem = ...
* Buffer buf = mem.asBuffer();
* ...
* Memory mem2 = buf.asMemory();
* ...
* </pre></blockquote>
*
* <p>Hierarchical memory regions can be easily created:</p>
* <blockquote><pre>
* WritableMemory wMem = ...
* WritableMemory wReg = wMem.writableRegion(offset, length); //OR
* Memory region = wMem.region(offset, length);
* </pre></blockquote>
*
* <p>All methods are checked for bounds violations.</p>
*
* <p>The classes in this package are not thread-safe.</p>
*
* @author Lee Rhodes
*/
package org.apache.datasketches.memory;
| 2,339 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/MemoryException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory;
/**
* Specific RuntimeExceptions for the datasketches-memory component.
*
* @author Lee Rhodes
*
*/
public class MemoryException extends RuntimeException {
private static final long serialVersionUID = 1L;
/**
* Constructs a new runtime exception with the specified detail message. The cause is not
* initialized, and may subsequently be initialized by a call to
* Throwable.initCause(java.lang.Throwable).
*
* @param message the detail message. The detail message is saved for later retrieval by the
* Throwable.getMessage() method.
*/
public MemoryException(final String message) {
super(message);
}
}
| 2,340 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/DirectNonNativeWritableMemoryImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableMemory;
/**
* Implementation of {@link WritableMemory} for direct memory, non-native byte order.
*
* @author Roman Leventov
* @author Lee Rhodes
*/
final class DirectNonNativeWritableMemoryImpl extends NonNativeWritableMemoryImpl {
private final AllocateDirect direct;
DirectNonNativeWritableMemoryImpl(
final AllocateDirect direct,
final long offsetBytes,
final long capacityBytes,
final int typeId,
final long cumOffsetBytes,
final MemoryRequestServer memReqSvr) {
super();
this.direct = direct;
this.offsetBytes = offsetBytes;
this.capacityBytes = capacityBytes;
this.typeId = removeNnBuf(typeId) | DIRECT | MEMORY | NONNATIVE; //initially cannot be ReadOnly
this.cumOffsetBytes = cumOffsetBytes;
this.memReqSvr = memReqSvr; //in ResourceImpl
if ((this.owner != null) && (this.owner != Thread.currentThread())) {
throw new IllegalStateException(THREAD_EXCEPTION_TEXT);
}
this.owner = Thread.currentThread();
}
@Override
BaseWritableMemoryImpl toWritableRegion(
final long regionOffsetBytes,
final long capacityBytes,
final boolean readOnly,
final ByteOrder byteOrder) {
final long newOffsetBytes = offsetBytes + regionOffsetBytes;
final long newCumOffsetBytes = cumOffsetBytes + regionOffsetBytes;
int typeIdOut = removeNnBuf(typeId) | MEMORY | REGION | (readOnly ? READONLY : 0);
if (Util.isNativeByteOrder(byteOrder)) {
typeIdOut |= NATIVE;
return new DirectWritableMemoryImpl(
direct, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr);
} else {
typeIdOut |= NONNATIVE;
return new DirectNonNativeWritableMemoryImpl(
direct, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr);
}
}
@Override
BaseWritableBufferImpl toWritableBuffer(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | BUFFER | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new DirectWritableBufferImpl(
direct, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
} else {
typeIdOut |= NONNATIVE;
return new DirectNonNativeWritableBufferImpl(
direct, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
}
}
@Override
public void close() {
checkValid();
checkThread(owner);
direct.close();
}
@Override
Object getUnsafeObject() {
return null;
}
@Override
public boolean isValid() {
return direct.getValid().get();
}
}
| 2,341 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/ResourceImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.apache.datasketches.memory.internal.UnsafeUtil.unsafe;
import static org.apache.datasketches.memory.internal.Util.characterPad;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.MemoryBoundsException;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.ReadOnlyException;
import org.apache.datasketches.memory.Resource;
/**
* Implements the root Resource methods.
*
* @author Lee Rhodes
*/
@SuppressWarnings("restriction")
public abstract class ResourceImpl implements Resource {
static final String JDK; //must be at least "1.8"
static final int JDK_MAJOR; //8, 11, 17, etc
//Used to convert "type" to bytes: bytes = longs << LONG_SHIFT
static final int BOOLEAN_SHIFT = 0;
static final int BYTE_SHIFT = 0;
static final long SHORT_SHIFT = 1;
static final long CHAR_SHIFT = 1;
static final long INT_SHIFT = 2;
static final long LONG_SHIFT = 3;
static final long FLOAT_SHIFT = 2;
static final long DOUBLE_SHIFT = 3;
//class type IDs. Do not change the bit orders
//The lowest 3 bits are set dynamically
// 0000 0XXX Group 1
static final int WRITABLE = 0;
static final int READONLY = 1;
static final int REGION = 1 << 1;
static final int DUPLICATE = 1 << 2; //for Buffer only
// 000X X000 Group 2
static final int HEAP = 0;
static final int DIRECT = 1 << 3;
static final int MAP = 1 << 4; //Map is always Direct also
// 00X0 0000 Group 3
static final int NATIVE = 0;
static final int NONNATIVE = 1 << 5;
// 0X00 0000 Group 4
static final int MEMORY = 0;
static final int BUFFER = 1 << 6;
// X000 0000 Group 5
static final int BYTEBUF = 1 << 7;
/**
* The java line separator character as a String.
*/
public static final String LS = System.getProperty("line.separator");
public static final ByteOrder NATIVE_BYTE_ORDER = ByteOrder.nativeOrder();
public static final ByteOrder NON_NATIVE_BYTE_ORDER =
(NATIVE_BYTE_ORDER == ByteOrder.LITTLE_ENDIAN) ? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN;
static final String NOT_MAPPED_FILE_RESOURCE = "This is not a memory-mapped file resource";
static final String THREAD_EXCEPTION_TEXT = "Attempted access outside owning thread";
static {
final String jdkVer = System.getProperty("java.version");
final int[] p = parseJavaVersion(jdkVer);
JDK = p[0] + "." + p[1];
JDK_MAJOR = (p[0] == 1) ? p[1] : p[0];
}
//set by the leaf nodes
long capacityBytes;
long cumOffsetBytes;
long offsetBytes;
int typeId;
MemoryRequestServer memReqSvr = null; //set by the user via the leaf nodes
Thread owner = null;
/**
* The root of the Memory inheritance hierarchy
*/
ResourceImpl() { }
/**
* Check the requested offset and length against the allocated size.
* The invariants equation is: {@code 0 <= reqOff <= reqLen <= reqOff + reqLen <= allocSize}.
* If this equation is violated an {@link MemoryBoundsException} will be thrown.
* @param reqOff the requested offset
* @param reqLen the requested length
* @param allocSize the allocated size.
* @Throws MemoryBoundsException if the given arguments constitute a violation
* of the invariants equation expressed above.
*/
public static void checkBounds(final long reqOff, final long reqLen, final long allocSize) {
if ((reqOff | reqLen | (reqOff + reqLen) | (allocSize - (reqOff + reqLen))) < 0) {
throw new MemoryBoundsException(
"reqOffset: " + reqOff + ", reqLength: " + reqLen
+ ", (reqOff + reqLen): " + (reqOff + reqLen) + ", allocSize: " + allocSize);
}
}
static void checkJavaVersion(final String jdkVer, final int p0, final int p1 ) {
final boolean ok = ((p0 == 1) && (p1 == 8)) || (p0 == 8) || (p0 == 11) || (p0 == 17);
if (!ok) { throw new IllegalArgumentException(
"Unsupported JDK Major Version. It must be one of 1.8, 8, 11, 17: " + jdkVer);
}
}
void checkNotReadOnly() {
if (isReadOnly()) {
throw new ReadOnlyException("Cannot write to a read-only Resource.");
}
}
/**
* This checks that the current thread is the same as the given owner thread.
* @Throws IllegalStateException if it is not.
* @param owner the given owner thread.
*/
static final void checkThread(final Thread owner) {
if (owner != Thread.currentThread()) {
throw new IllegalStateException(THREAD_EXCEPTION_TEXT);
}
}
/**
* @throws IllegalStateException if this Resource is AutoCloseable, and already closed, i.e., not <em>valid</em>.
*/
void checkValid() {
if (!isValid()) {
throw new IllegalStateException("this Resource is AutoCloseable, and already closed, i.e., not <em>valid</em>.");
}
}
/**
* Checks that this resource is still valid and throws a MemoryInvalidException if it is not.
* Checks that the specified range of bytes is within bounds of this resource, throws
* {@link MemoryBoundsException} if it's not: i. e. if offsetBytes < 0, or length < 0,
* or offsetBytes + length > {@link #getCapacity()}.
* @param offsetBytes the given offset in bytes of this object
* @param lengthBytes the given length in bytes of this object
* @Throws MemoryInvalidException if this resource is AutoCloseable and is no longer valid, i.e.,
* it has already been closed.
* @Throws MemoryBoundsException if this resource violates the memory bounds of this resource.
*/
public final void checkValidAndBounds(final long offsetBytes, final long lengthBytes) {
checkValid();
checkBounds(offsetBytes, lengthBytes, getCapacity());
}
/**
* Checks that this resource is still valid and throws a MemoryInvalidException if it is not.
* Checks that the specified range of bytes is within bounds of this resource, throws
* {@link MemoryBoundsException} if it's not: i. e. if offsetBytes < 0, or length < 0,
* or offsetBytes + length > {@link #getCapacity()}.
* Checks that this operation is a read-only operation and throws a ReadOnlyException if not.
* @param offsetBytes the given offset in bytes of this object
* @param lengthBytes the given length in bytes of this object
* @Throws MemoryInvalidException if this resource is AutoCloseable and is no longer valid, i.e.,
* it has already been closed.
* @Throws MemoryBoundsException if this resource violates the memory bounds of this resource.
* @Throws ReadOnlyException if the associated operation is not a Read-only operation.
*/
final void checkValidAndBoundsForWrite(final long offsetBytes, final long lengthBytes) {
checkValid();
checkBounds(offsetBytes, lengthBytes, getCapacity());
if (isReadOnly()) {
throw new ReadOnlyException("Memory is read-only.");
}
}
@Override
public void close() {
/* Overridden by the actual AutoCloseable leaf sub-classes. */
throw new UnsupportedOperationException("This resource is not AutoCloseable.");
}
@Override
public final boolean equalTo(final long thisOffsetBytes, final Resource that,
final long thatOffsetBytes, final long lengthBytes) {
if (that == null) { return false; }
return CompareAndCopy.equals(this, thisOffsetBytes, (ResourceImpl) that, thatOffsetBytes, lengthBytes);
}
@Override
public void force() { //overridden by Map Leaves
throw new UnsupportedOperationException(NOT_MAPPED_FILE_RESOURCE);
}
//Overridden by ByteBuffer Leaves. Used internally and for tests.
ByteBuffer getByteBuffer() {
return null;
}
@Override
public final ByteOrder getByteOrder() {
return isNativeOrder(getTypeId()) ? NATIVE_BYTE_ORDER : NON_NATIVE_BYTE_ORDER;
}
@Override
public long getCapacity() {
checkValid();
return capacityBytes;
}
/**
* Gets the cumulative offset in bytes of this object from the backing resource including the given
* localOffsetBytes. This offset may also include other offset components such as the native off-heap
* memory address, DirectByteBuffer split offsets, region offsets, and object arrayBaseOffsets.
*
* @param addOffsetBytes offset to be added to the cumulative offset.
* @return the cumulative offset in bytes of this object from the backing resource including the
* given offsetBytes.
*/
long getCumulativeOffset(final long addOffsetBytes) {
return cumOffsetBytes + addOffsetBytes;
}
@Override
public MemoryRequestServer getMemoryRequestServer() { return memReqSvr; }
@Override
public long getTotalOffset() {
return offsetBytes;
}
//Overridden by all leaves
int getTypeId() {
return typeId;
}
//Overridden by Heap and ByteBuffer leaves. Made public as getArray() in WritableMemoryImpl and
// WritableBufferImpl
Object getUnsafeObject() {
return null;
}
@Override
public boolean isByteBufferResource() {
return (getTypeId() & BYTEBUF) > 0;
}
@Override
public final boolean isByteOrderCompatible(final ByteOrder byteOrder) {
final ByteOrder typeBO = getByteOrder();
return typeBO == ByteOrder.nativeOrder() && typeBO == byteOrder;
}
final boolean isBufferApi(final int typeId) {
return (typeId & BUFFER) > 0;
}
@Override
public final boolean isDirectResource() {
return getUnsafeObject() == null;
}
@Override
public boolean isDuplicateBufferView() {
return (getTypeId() & DUPLICATE) > 0;
}
@Override
public final boolean isHeapResource() {
checkValid();
return getUnsafeObject() != null;
}
@Override
public boolean isLoaded() { //overridden by Map Leaves
throw new IllegalStateException(NOT_MAPPED_FILE_RESOURCE);
}
@Override
public boolean isMemoryMappedResource() {
return (getTypeId() & MAP) > 0;
}
@Override
public boolean isMemoryApi() {
return (getTypeId() & BUFFER) == 0;
}
final boolean isNativeOrder(final int typeId) { //not used
return (typeId & NONNATIVE) == 0;
}
@Override
public boolean isNonNativeOrder() {
return (getTypeId() & NONNATIVE) > 0;
}
@Override
public final boolean isReadOnly() {
checkValid();
return (getTypeId() & READONLY) > 0;
}
@Override
public boolean isRegionView() {
return (getTypeId() & REGION) > 0;
}
@Override
public boolean isSameResource(final Resource that) {
checkValid();
if (that == null) { return false; }
final ResourceImpl that1 = (ResourceImpl) that;
that1.checkValid();
if (this == that1) { return true; }
return getCumulativeOffset(0) == that1.getCumulativeOffset(0)
&& getCapacity() == that1.getCapacity()
&& getUnsafeObject() == that1.getUnsafeObject()
&& getByteBuffer() == that1.getByteBuffer();
}
//Overridden by Direct and Map leaves
@Override
public boolean isValid() {
return true;
}
@Override
public void load() { //overridden by Map leaves
throw new IllegalStateException(NOT_MAPPED_FILE_RESOURCE);
}
private static String pad(final String s, final int fieldLen) {
return characterPad(s, fieldLen, ' ' , true);
}
/**
* Returns first two number groups of the java version string.
* @param jdkVer the java version string from System.getProperty("java.version").
* @return first two number groups of the java version string.
* @throws IllegalArgumentException for an improper Java version string.
*/
static int[] parseJavaVersion(final String jdkVer) {
final int p0, p1;
try {
String[] parts = jdkVer.trim().split("^0-9\\.");//grab only number groups and "."
parts = parts[0].split("\\."); //split out the number groups
p0 = Integer.parseInt(parts[0]); //the first number group
p1 = (parts.length > 1) ? Integer.parseInt(parts[1]) : 0; //2nd number group, or 0
} catch (final NumberFormatException | ArrayIndexOutOfBoundsException e) {
throw new IllegalArgumentException("Improper Java -version string: " + jdkVer + "\n" + e);
}
checkJavaVersion(jdkVer, p0, p1);
return new int[] {p0, p1};
}
//REACHABILITY FENCE
static void reachabilityFence(final Object obj) { }
final static int removeNnBuf(final int typeId) { return typeId & ~NONNATIVE & ~BUFFER; }
final static int setReadOnlyBit(final int typeId, final boolean readOnly) {
return readOnly ? typeId | READONLY : typeId & ~READONLY;
}
@Override
public void setMemoryRequestServer(final MemoryRequestServer memReqSvr) { this.memReqSvr = memReqSvr; }
/**
* Returns a formatted hex string of an area of this object.
* Used primarily for testing.
* @param state the ResourceImpl
* @param preamble a descriptive header
* @param offsetBytes offset bytes relative to the MemoryImpl start
* @param lengthBytes number of bytes to convert to a hex string
* @return a formatted hex string in a human readable array
*/
static final String toHex(final ResourceImpl state, final String preamble, final long offsetBytes,
final int lengthBytes) {
final long capacity = state.getCapacity();
ResourceImpl.checkBounds(offsetBytes, lengthBytes, capacity);
final StringBuilder sb = new StringBuilder();
final Object uObj = state.getUnsafeObject();
final String uObjStr;
final long uObjHeader;
if (uObj == null) {
uObjStr = "null";
uObjHeader = 0;
} else {
uObjStr = uObj.getClass().getSimpleName() + ", " + (uObj.hashCode() & 0XFFFFFFFFL);
uObjHeader = UnsafeUtil.getArrayBaseOffset(uObj.getClass());
}
final ByteBuffer bb = state.getByteBuffer();
final String bbStr = bb == null ? "null"
: bb.getClass().getSimpleName() + ", " + (bb.hashCode() & 0XFFFFFFFFL);
final MemoryRequestServer memReqSvr = state.getMemoryRequestServer();
final String memReqStr = memReqSvr != null
? memReqSvr.getClass().getSimpleName() + ", " + (memReqSvr.hashCode() & 0XFFFFFFFFL)
: "null";
final long cumBaseOffset = state.getCumulativeOffset(0);
sb.append(preamble).append(LS);
sb.append("UnsafeObj, hashCode : ").append(uObjStr).append(LS);
sb.append("UnsafeObjHeader : ").append(uObjHeader).append(LS);
sb.append("ByteBuf, hashCode : ").append(bbStr).append(LS);
sb.append("RegionOffset : ").append(state.getTotalOffset()).append(LS);
sb.append("Capacity : ").append(capacity).append(LS);
sb.append("CumBaseOffset : ").append(cumBaseOffset).append(LS);
sb.append("MemReq, hashCode : ").append(memReqStr).append(LS);
sb.append("Valid : ").append(state.isValid()).append(LS);
sb.append("Read Only : ").append(state.isReadOnly()).append(LS);
sb.append("Type Byte Order : ").append(state.getByteOrder().toString()).append(LS);
sb.append("Native Byte Order : ").append(ByteOrder.nativeOrder().toString()).append(LS);
sb.append("JDK Runtime Version : ").append(UnsafeUtil.JDK).append(LS);
//Data detail
sb.append("Data, littleEndian : 0 1 2 3 4 5 6 7");
for (long i = 0; i < lengthBytes; i++) {
final int b = unsafe.getByte(uObj, cumBaseOffset + offsetBytes + i) & 0XFF;
if (i % 8 == 0) { //row header
sb.append(String.format("%n%20s: ", offsetBytes + i));
}
sb.append(String.format("%02x ", b));
}
sb.append(LS);
return sb.toString();
}
@Override
public final String toHexString(final String header, final long offsetBytes,
final int lengthBytes) {
checkValid();
final String klass = this.getClass().getSimpleName();
final String s1 = String.format("(..., %d, %d)", offsetBytes, lengthBytes);
final long hcode = hashCode() & 0XFFFFFFFFL;
final String call = ".toHexString" + s1 + ", hashCode: " + hcode;
final StringBuilder sb = new StringBuilder();
sb.append("### ").append(klass).append(" SUMMARY ###").append(LS);
sb.append("Header Comment : ").append(header).append(LS);
sb.append("Call Parameters : ").append(call);
return toHex(this, sb.toString(), offsetBytes, lengthBytes);
}
/**
* Decodes the resource type. This is primarily for debugging.
* @param typeId the given typeId
* @return a human readable string.
*/
static final String typeDecode(final int typeId) {
final StringBuilder sb = new StringBuilder();
final int group1 = typeId & 0x7;
switch (group1) { // 0000 0XXX
case 0 : sb.append(pad("Writable + ",32)); break;
case 1 : sb.append(pad("ReadOnly + ",32)); break;
case 2 : sb.append(pad("Writable + Region + ",32)); break;
case 3 : sb.append(pad("ReadOnly + Region + ",32)); break;
case 4 : sb.append(pad("Writable + Duplicate + ",32)); break;
case 5 : sb.append(pad("ReadOnly + Duplicate + ",32)); break;
case 6 : sb.append(pad("Writable + Region + Duplicate + ",32)); break;
case 7 : sb.append(pad("ReadOnly + Region + Duplicate + ",32)); break;
default: break;
}
final int group2 = (typeId >>> 3) & 0x3;
switch (group2) { // 000X X000
case 0 : sb.append(pad("Heap + ",15)); break;
case 1 : sb.append(pad("Direct + ",15)); break;
case 2 : sb.append(pad("Map + Direct + ",15)); break;
case 3 : sb.append(pad("Map + Direct + ",15)); break;
default: break;
}
final int group3 = (typeId >>> 5) & 0x1;
switch (group3) { // 00X0 0000
case 0 : sb.append(pad("NativeOrder + ",17)); break;
case 1 : sb.append(pad("NonNativeOrder + ",17)); break;
default: break;
}
final int group4 = (typeId >>> 6) & 0x1;
switch (group4) { // 0X00 0000
case 0 : sb.append(pad("Memory + ",9)); break;
case 1 : sb.append(pad("Buffer + ",9)); break;
default: break;
}
final int group5 = (typeId >>> 7) & 0x1;
switch (group5) { // X000 0000
case 0 : sb.append(pad("",10)); break;
case 1 : sb.append(pad("ByteBuffer",10)); break;
default: break;
}
return sb.toString();
}
@Override
public final long xxHash64(final long offsetBytes, final long lengthBytes, final long seed) {
checkValid();
return XxHash64.hash(getUnsafeObject(), getCumulativeOffset(0) + offsetBytes, lengthBytes, seed);
}
@Override
public final long xxHash64(final long in, final long seed) {
return XxHash64.hash(in, seed);
}
}
| 2,342 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/NativeWritableMemoryImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_CHAR_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_DOUBLE_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_DOUBLE_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_FLOAT_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_FLOAT_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_INT_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_LONG_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_SHORT_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.unsafe;
import org.apache.datasketches.memory.WritableMemory;
/**
* Implementation of {@link WritableMemory} for native endian byte order.
* @author Roman Leventov
* @author Lee Rhodes
*/
@SuppressWarnings("restriction")
abstract class NativeWritableMemoryImpl extends BaseWritableMemoryImpl {
//Pass-through constructor
NativeWritableMemoryImpl() { }
///PRIMITIVE getX() and getXArray()
@Override
public char getChar(final long offsetBytes) {
return getNativeOrderedChar(offsetBytes);
}
@Override
public void getCharArray(final long offsetBytes, final char[] dstArray, final int dstOffsetChars,
final int lengthChars) {
final long copyBytes = ((long) lengthChars) << CHAR_SHIFT;
checkValidAndBounds(offsetBytes, copyBytes);
ResourceImpl.checkBounds(dstOffsetChars, lengthChars, dstArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
getUnsafeObject(),
getCumulativeOffset(offsetBytes),
dstArray,
ARRAY_CHAR_BASE_OFFSET + (((long) dstOffsetChars) << CHAR_SHIFT),
copyBytes);
}
@Override
public double getDouble(final long offsetBytes) {
checkValidAndBounds(offsetBytes, ARRAY_DOUBLE_INDEX_SCALE);
return unsafe.getDouble(getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
@Override
public void getDoubleArray(final long offsetBytes, final double[] dstArray,
final int dstOffsetDoubles, final int lengthDoubles) {
final long copyBytes = ((long) lengthDoubles) << DOUBLE_SHIFT;
checkValidAndBounds(offsetBytes, copyBytes);
ResourceImpl.checkBounds(dstOffsetDoubles, lengthDoubles, dstArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
getUnsafeObject(),
getCumulativeOffset(offsetBytes),
dstArray,
ARRAY_DOUBLE_BASE_OFFSET + (((long) dstOffsetDoubles) << DOUBLE_SHIFT),
copyBytes);
}
@Override
public float getFloat(final long offsetBytes) {
checkValidAndBounds(offsetBytes, ARRAY_FLOAT_INDEX_SCALE);
return unsafe.getFloat(getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
@Override
public void getFloatArray(final long offsetBytes, final float[] dstArray,
final int dstOffsetFloats, final int lengthFloats) {
final long copyBytes = ((long) lengthFloats) << FLOAT_SHIFT;
checkValidAndBounds(offsetBytes, copyBytes);
ResourceImpl.checkBounds(dstOffsetFloats, lengthFloats, dstArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
getUnsafeObject(),
getCumulativeOffset(offsetBytes),
dstArray,
ARRAY_FLOAT_BASE_OFFSET + (((long) dstOffsetFloats) << FLOAT_SHIFT),
copyBytes);
}
@Override
public int getInt(final long offsetBytes) {
return getNativeOrderedInt(offsetBytes);
}
@Override
public void getIntArray(final long offsetBytes, final int[] dstArray, final int dstOffsetInts,
final int lengthInts) {
final long copyBytes = ((long) lengthInts) << INT_SHIFT;
checkValidAndBounds(offsetBytes, copyBytes);
ResourceImpl.checkBounds(dstOffsetInts, lengthInts, dstArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
getUnsafeObject(),
getCumulativeOffset(offsetBytes),
dstArray,
ARRAY_INT_BASE_OFFSET + (((long) dstOffsetInts) << INT_SHIFT),
copyBytes);
}
@Override
public long getLong(final long offsetBytes) {
return getNativeOrderedLong(offsetBytes);
}
@Override
public void getLongArray(final long offsetBytes, final long[] dstArray,
final int dstOffsetLongs, final int lengthLongs) {
final long copyBytes = ((long) lengthLongs) << LONG_SHIFT;
checkValidAndBounds(offsetBytes, copyBytes);
ResourceImpl.checkBounds(dstOffsetLongs, lengthLongs, dstArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
getUnsafeObject(),
getCumulativeOffset(offsetBytes),
dstArray,
ARRAY_LONG_BASE_OFFSET + (((long) dstOffsetLongs) << LONG_SHIFT),
copyBytes);
}
@Override
public short getShort(final long offsetBytes) {
return getNativeOrderedShort(offsetBytes);
}
@Override
public void getShortArray(final long offsetBytes, final short[] dstArray,
final int dstOffsetShorts, final int lengthShorts) {
final long copyBytes = ((long) lengthShorts) << SHORT_SHIFT;
checkValidAndBounds(offsetBytes, copyBytes);
ResourceImpl.checkBounds(dstOffsetShorts, lengthShorts, dstArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
getUnsafeObject(),
getCumulativeOffset(offsetBytes),
dstArray,
ARRAY_SHORT_BASE_OFFSET + (((long) dstOffsetShorts) << SHORT_SHIFT),
copyBytes);
}
//PRIMITIVE putX() and putXArray() implementations
@Override
public void putChar(final long offsetBytes, final char value) {
putNativeOrderedChar(offsetBytes, value);
}
@Override
public void putCharArray(final long offsetBytes, final char[] srcArray,
final int srcOffsetChars, final int lengthChars) {
final long copyBytes = ((long) lengthChars) << CHAR_SHIFT;
checkValidAndBoundsForWrite(offsetBytes, copyBytes);
ResourceImpl.checkBounds(srcOffsetChars, lengthChars, srcArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
srcArray,
ARRAY_CHAR_BASE_OFFSET + (((long) srcOffsetChars) << CHAR_SHIFT),
getUnsafeObject(),
getCumulativeOffset(offsetBytes),
copyBytes
);
}
@Override
public void putDouble(final long offsetBytes, final double value) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_DOUBLE_INDEX_SCALE);
unsafe.putDouble(getUnsafeObject(), getCumulativeOffset(offsetBytes), value);
}
@Override
public void putDoubleArray(final long offsetBytes, final double[] srcArray,
final int srcOffsetDoubles, final int lengthDoubles) {
final long copyBytes = ((long) lengthDoubles) << DOUBLE_SHIFT;
checkValidAndBoundsForWrite(offsetBytes, copyBytes);
ResourceImpl.checkBounds(srcOffsetDoubles, lengthDoubles, srcArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
srcArray,
ARRAY_DOUBLE_BASE_OFFSET + (((long) srcOffsetDoubles) << DOUBLE_SHIFT),
getUnsafeObject(),
getCumulativeOffset(offsetBytes),
copyBytes
);
}
@Override
public void putFloat(final long offsetBytes, final float value) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_FLOAT_INDEX_SCALE);
unsafe.putFloat(getUnsafeObject(), getCumulativeOffset(offsetBytes), value);
}
@Override
public void putFloatArray(final long offsetBytes, final float[] srcArray,
final int srcOffsetFloats, final int lengthFloats) {
final long copyBytes = ((long) lengthFloats) << FLOAT_SHIFT;
checkValidAndBoundsForWrite(offsetBytes, copyBytes);
ResourceImpl.checkBounds(srcOffsetFloats, lengthFloats, srcArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
srcArray,
ARRAY_FLOAT_BASE_OFFSET + (((long) srcOffsetFloats) << FLOAT_SHIFT),
getUnsafeObject(),
getCumulativeOffset(offsetBytes),
copyBytes
);
}
@Override
public void putInt(final long offsetBytes, final int value) {
putNativeOrderedInt(offsetBytes, value);
}
@Override
public void putIntArray(final long offsetBytes, final int[] srcArray, final int srcOffsetInts,
final int lengthInts) {
final long copyBytes = ((long) lengthInts) << INT_SHIFT;
checkValidAndBoundsForWrite(offsetBytes, copyBytes);
ResourceImpl.checkBounds(srcOffsetInts, lengthInts, srcArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
srcArray,
ARRAY_INT_BASE_OFFSET + (((long) srcOffsetInts) << INT_SHIFT),
getUnsafeObject(),
getCumulativeOffset(offsetBytes),
copyBytes
);
}
@Override
public void putLong(final long offsetBytes, final long value) {
putNativeOrderedLong(offsetBytes, value);
}
@Override
public void putLongArray(final long offsetBytes, final long[] srcArray, final int srcOffsetLongs,
final int lengthLongs) {
final long copyBytes = ((long) lengthLongs) << LONG_SHIFT;
checkValidAndBoundsForWrite(offsetBytes, copyBytes);
ResourceImpl.checkBounds(srcOffsetLongs, lengthLongs, srcArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
srcArray,
ARRAY_LONG_BASE_OFFSET + (((long) srcOffsetLongs) << LONG_SHIFT),
getUnsafeObject(),
getCumulativeOffset(offsetBytes),
copyBytes
);
}
@Override
public void putShort(final long offsetBytes, final short value) {
putNativeOrderedShort(offsetBytes, value);
}
@Override
public void putShortArray(final long offsetBytes, final short[] srcArray,
final int srcOffsetShorts, final int lengthShorts) {
final long copyBytes = ((long) lengthShorts) << SHORT_SHIFT;
checkValidAndBoundsForWrite(offsetBytes, copyBytes);
ResourceImpl.checkBounds(srcOffsetShorts, lengthShorts, srcArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
srcArray,
ARRAY_SHORT_BASE_OFFSET + (((long) srcOffsetShorts) << SHORT_SHIFT),
getUnsafeObject(),
getCumulativeOffset(offsetBytes),
copyBytes
);
}
}
| 2,343 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/Util.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.ByteOrder;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Objects;
import java.util.Random;
import org.apache.datasketches.memory.Memory;
/**
* @author Lee Rhodes
*/
public final class Util {
private Util() { }
public static final String LS = System.getProperty("line.separator");
/**
* Don't use sun.misc.Unsafe#copyMemory to copy blocks of memory larger than this
* threshold, because internally it doesn't have safepoint polls, that may cause long
* "Time To Safe Point" pauses in the application. This has been fixed in JDK 9 (see
* https://bugs.openjdk.java.net/browse/JDK-8149596 and
* https://bugs.openjdk.java.net/browse/JDK-8141491), but not in JDK 8, so the Memory library
* should keep having this boilerplate as long as it supports Java 8.
*
* <p>A reference to this can be found in java.nio.Bits.</p>
*/
public static final int UNSAFE_COPY_THRESHOLD_BYTES = 1024 * 1024;
//Byte Order related
public static final ByteOrder NATIVE_BYTE_ORDER = ByteOrder.nativeOrder();
public static final ByteOrder NON_NATIVE_BYTE_ORDER = NATIVE_BYTE_ORDER == ByteOrder.LITTLE_ENDIAN
? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN;
public static ByteOrder otherByteOrder(final ByteOrder order) {
return (order == NATIVE_BYTE_ORDER) ? NON_NATIVE_BYTE_ORDER : NATIVE_BYTE_ORDER;
}
/**
* Returns true if the given byteOrder is the same as the native byte order.
* @param byteOrder the given byte order
* @return true if the given byteOrder is the same as the native byte order.
*/
public static boolean isNativeByteOrder(final ByteOrder byteOrder) {
if (byteOrder == null) {
throw new IllegalArgumentException("ByteOrder parameter cannot be null.");
}
return ByteOrder.nativeOrder() == byteOrder;
}
/**
* Searches a range of the specified array of longs for the specified value using the binary
* search algorithm. The range must be sorted method) prior to making this call.
* If it is not sorted, the results are undefined. If the range contains
* multiple elements with the specified value, there is no guarantee which one will be found.
* @param mem the Memory to be searched
* @param fromLongIndex the index of the first element (inclusive) to be searched
* @param toLongIndex the index of the last element (exclusive) to be searched
* @param key the value to be searched for
* @return index of the search key, if it is contained in the array within the specified range;
* otherwise, (-(insertion point) - 1). The insertion point is defined as the point at which
* the key would be inserted into the array: the index of the first element in the range greater
* than the key, or toIndex if all elements in the range are less than the specified key.
* Note that this guarantees that the return value will be ≥ 0 if and only if the key is found.
*/
public static long binarySearchLongs(final Memory mem, final long fromLongIndex,
final long toLongIndex, final long key) {
ResourceImpl.checkBounds(fromLongIndex << 3, (toLongIndex - fromLongIndex) << 3, mem.getCapacity());
long low = fromLongIndex;
long high = toLongIndex - 1L;
while (low <= high) {
final long mid = (low + high) >>> 1;
final long midVal = mem.getLong(mid << 3);
if (midVal < key) { low = mid + 1; }
else if (midVal > key) { high = mid - 1; }
else { return mid; } // key found
}
return -(low + 1); // key not found.
}
/**
* Prepend the given string with zeros. If the given string is equal or greater than the given
* field length, it will be returned without modification.
* @param s the given string
* @param fieldLength desired total field length including the given string
* @return the given string prepended with zeros.
*/
public static final String zeroPad(final String s, final int fieldLength) {
return characterPad(s, fieldLength, '0', false);
}
/**
* Prepend or postpend the given string with the given character to fill the given field length.
* If the given string is equal or greater than the given field length, it will be returned
* without modification.
* @param s the given string
* @param fieldLength the desired field length
* @param padChar the desired pad character
* @param postpend if true append the pacCharacters to the end of the string.
* @return prepended or postpended given string with the given character to fill the given field
* length.
*/
public static final String characterPad(final String s, final int fieldLength,
final char padChar, final boolean postpend) {
final char[] chArr = s.toCharArray();
final int sLen = chArr.length;
if (sLen < fieldLength) {
final char[] out = new char[fieldLength];
final int blanks = fieldLength - sLen;
if (postpend) {
for (int i = 0; i < sLen; i++) {
out[i] = chArr[i];
}
for (int i = sLen; i < fieldLength; i++) {
out[i] = padChar;
}
} else { //prepend
for (int i = 0; i < blanks; i++) {
out[i] = padChar;
}
for (int i = blanks; i < fieldLength; i++) {
out[i] = chArr[i - blanks];
}
}
return String.valueOf(out);
}
return s;
}
/**
* Return true if all the masked bits of value are zero
* @param value the value to be tested
* @param bitMask defines the bits of interest
* @return true if all the masked bits of value are zero
*/
public static final boolean isAllBitsClear(final long value, final long bitMask) {
return (~value & bitMask) == bitMask;
}
/**
* Return true if all the masked bits of value are one
* @param value the value to be tested
* @param bitMask defines the bits of interest
* @return true if all the masked bits of value are one
*/
public static final boolean isAllBitsSet(final long value, final long bitMask) {
return (value & bitMask) == bitMask;
}
/**
* Return true if any the masked bits of value are zero
* @param value the value to be tested
* @param bitMask defines the bits of interest
* @return true if any the masked bits of value are zero
*/
public static final boolean isAnyBitsClear(final long value, final long bitMask) {
return (~value & bitMask) != 0;
}
/**
* Return true if any the masked bits of value are one
* @param value the value to be tested
* @param bitMask defines the bits of interest
* @return true if any the masked bits of value are one
*/
public static final boolean isAnyBitsSet(final long value, final long bitMask) {
return (value & bitMask) != 0;
}
/**
* Creates random valid Character Code Points (as integers). By definition, valid CodePoints
* are integers in the range 0 to Character.MAX_CODE_POINT, and exclude the surrogate values.
* This is used in unit testing and characterization testing of the UTF8 class. Because the
* characterization tools are in a separate package, this must remain public.
*
* @author Lee Rhodes
*/
public static class RandomCodePoints {
private Random rand; //
private static final int ALL_CP = Character.MAX_CODE_POINT + 1;
private static final int MIN_SUR = Character.MIN_SURROGATE;
private static final int MAX_SUR = Character.MAX_SURROGATE;
/**
* @param deterministic if true, configure java.util.Random with a fixed seed.
*/
public RandomCodePoints(final boolean deterministic) {
rand = deterministic ? new Random(0) : new Random();
}
/**
* Fills the given array with random valid Code Points from 0, inclusive, to
* <i>Character.MAX_CODE_POINT</i>, inclusive.
* The surrogate range, which is from <i>Character.MIN_SURROGATE</i>, inclusive, to
* <i>Character.MAX_SURROGATE</i>, inclusive, is always <u>excluded</u>.
* @param cpArr the array to fill
*/
public final void fillCodePointArray(final int[] cpArr) {
fillCodePointArray(cpArr, 0, ALL_CP);
}
/**
* Fills the given array with random valid Code Points from <i>startCP</i>, inclusive, to
* <i>endCP</i>, exclusive.
* The surrogate range, which is from <i>Character.MIN_SURROGATE</i>, inclusive, to
* <i>Character.MAX_SURROGATE</i>, inclusive, is always <u>excluded</u>.
* @param cpArr the array to fill
* @param startCP the starting Code Point, included.
* @param endCP the ending Code Point, excluded. This value cannot exceed 0x110000.
*/
public final void fillCodePointArray(final int[] cpArr, final int startCP, final int endCP) {
final int arrLen = cpArr.length;
final int numCP = Math.min(endCP, 0X110000) - Math.min(0, startCP);
int idx = 0;
while (idx < arrLen) {
final int cp = startCP + rand.nextInt(numCP);
if ((cp >= MIN_SUR) && (cp <= MAX_SUR)) {
continue;
}
cpArr[idx++] = cp;
}
}
/**
* Return a single valid random Code Point from 0, inclusive, to
* <i>Character.MAX_CODE_POINT</i>, inclusive.
* The surrogate range, which is from <i>Character.MIN_SURROGATE</i>, inclusive, to
* <i>Character.MAX_SURROGATE</i>, inclusive, is always <u>excluded</u>.
* @return a single valid random CodePoint.
*/
public final int getCodePoint() {
return getCodePoint(0, ALL_CP);
}
/**
* Return a single valid random Code Point from <i>startCP</i>, inclusive, to
* <i>endCP</i>, exclusive.
* The surrogate range, which is from <i>Character.MIN_SURROGATE</i>, inclusive, to
* <i>Character.MAX_SURROGATE</i>, inclusive, is always <u>excluded</u>.
* @param startCP the starting Code Point, included.
* @param endCP the ending Code Point, excluded. This value cannot exceed 0x110000.
* @return a single valid random CodePoint.
*/
public final int getCodePoint(final int startCP, final int endCP) {
final int numCP = Math.min(endCP, 0X110000) - Math.min(0, startCP);
while (true) {
final int cp = startCP + rand.nextInt(numCP);
if ((cp < MIN_SUR) || (cp > MAX_SUR)) {
return cp;
}
}
}
} //End class RandomCodePoints
public static final void zeroCheck(final long value, final String arg) {
if (value <= 0) {
throw new IllegalArgumentException("The argument '" + arg + "' may not be negative or zero.");
}
}
public static final void negativeCheck(final long value, final String arg) {
if (value < 0) {
throw new IllegalArgumentException("The argument '" + arg + "' may not be negative.");
}
}
public static final void nullCheck(final Object obj, final String arg) {
if (obj == null) {
throw new IllegalArgumentException("The argument '" + arg + "' may not be null.");
}
}
//Resources NOTE: these 3 methods are duplicated in Java/ datasketches/Util
/**
* Gets the absolute path of the given resource file's shortName.
*
* <p>Note that the ClassLoader.getResource(shortName) returns a URL,
* which can have special characters, e.g., "%20" for spaces. This method
* obtains the URL, converts it to a URI, then does a uri.getPath(), which
* decodes any special characters in the URI path. This is required to make
* obtaining resources operating-system independent.</p>
*
* @param shortFileName the last name in the pathname's name sequence.
* @return the absolute path of the given resource file's shortName.
*/
public static String getResourcePath(final String shortFileName) {
Objects.requireNonNull(shortFileName, "input parameter " + shortFileName + " cannot be null.");
try {
final URL url = Util.class.getClassLoader().getResource(shortFileName);
Objects.requireNonNull(url, "resource " + shortFileName + " could not be acquired.");
final URI uri = url.toURI();
//decodes any special characters
final String path = uri.isAbsolute() ? Paths.get(uri).toAbsolutePath().toString() : uri.getPath();
return path;
} catch (final URISyntaxException e) {
throw new IllegalArgumentException("Cannot find resource: " + shortFileName + LS + e);
}
}
/**
* Gets the file defined by the given resource file's shortFileName.
* @param shortFileName the last name in the pathname's name sequence.
* @return the file defined by the given resource file's shortFileName.
*/
public static File getResourceFile(final String shortFileName) {
return new File(getResourcePath(shortFileName));
}
/**
* Returns a byte array of the contents of the file defined by the given resource file's
* shortFileName.
* @param shortFileName the last name in the pathname's name sequence.
* @return a byte array of the contents of the file defined by the given resource file's
* shortFileName.
*/
public static byte[] getResourceBytes(final String shortFileName) {
try {
return Files.readAllBytes(Paths.get(getResourcePath(shortFileName)));
} catch (final IOException e) {
throw new IllegalArgumentException("Cannot read resource: " + shortFileName + LS + e);
}
}
}
| 2,344 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/BaseBufferImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import org.apache.datasketches.memory.BaseBuffer;
import org.apache.datasketches.memory.BufferPositionInvariantsException;
/**
* A new positional API. This is different from and simpler than Java Buffer positional approach.
* <ul><li>All based on longs instead of ints.</li>
* <li>Eliminated "mark". Rarely used and confusing with its silent side effects.</li>
* <li>The invariants are {@code 0 <= start <= position <= end <= capacity}.</li>
* <li>It always starts up as (0, 0, capacity, capacity).</li>
* <li>You set (start, position, end) in one call with
* {@link #setStartPositionEnd(long, long, long)}</li>
* <li>Position can be set directly or indirectly when using the positional get/put methods.
* <li>Added incrementPosition(long), which is much easier when you know the increment.</li>
* <li>This approach eliminated a number of methods and checks, and has no unseen side effects,
* e.g., mark being invalidated.</li>
* <li>Clearer method naming (IMHO).</li>
* </ul>
*
* @author Lee Rhodes
*/
public abstract class BaseBufferImpl extends ResourceImpl implements BaseBuffer {
private long capacity;
private long start = 0;
private long pos = 0;
private long end;
//Pass-through constructor
BaseBufferImpl(final long capacityBytes) {
super();
capacity = end = capacityBytes;
}
@Override
public final BaseBufferImpl incrementPosition(final long increment) {
incrementAndCheckPositionForRead(pos, increment);
return this;
}
@Override
public final BaseBufferImpl incrementAndCheckPosition(final long increment) {
incrementAndCheckPositionForRead(pos, increment);
return this;
}
@Override
public final long getEnd() {
return end;
}
@Override
public final long getPosition() {
return pos;
}
@Override
public final long getStart() {
return start;
}
@Override
public final long getRemaining() {
return end - pos;
}
@Override
public final boolean hasRemaining() {
return (end - pos) > 0;
}
@Override
public final BaseBufferImpl resetPosition() {
pos = start;
return this;
}
@Override
public final BaseBufferImpl setPosition(final long position) {
return setStartPositionEnd(start, position, end);
}
@Override
public final BaseBufferImpl setStartPositionEnd(final long start, final long position, final long end) {
checkValid();
checkInvariants(start, position, end, capacity);
this.start = start;
this.end = end;
pos = position;
return this;
}
//RESTRICTED
//used for buffer arrays and apply at runtime
final void incrementAndCheckPositionForRead(final long position, final long increment) {
checkValid();
final long newPos = position + increment;
checkInvariants(start, newPos, end, capacity);
pos = newPos;
}
//used for buffer arrays and apply at runtime
final void incrementAndCheckPositionForWrite(final long position, final long increment) {
checkNotReadOnly();
incrementAndCheckPositionForRead(position, increment);
}
/**
* The invariants equation is: {@code 0 <= start <= position <= end <= capacity}.
* If this equation is violated a <i>BufferPositionInvariantsException</i> will be thrown.
* @param start the lowest start position
* @param pos the current position
* @param end the highest position
* @param cap the capacity of the backing buffer.
*/
static final void checkInvariants(final long start, final long pos, final long end, final long cap) {
if ((start | pos | end | cap | (pos - start) | (end - pos) | (cap - end) ) < 0L) {
throw new BufferPositionInvariantsException(
"Violation of Invariants: "
+ "start: " + start
+ " <= pos: " + pos
+ " <= end: " + end
+ " <= cap: " + cap
+ "; (pos - start): " + (pos - start)
+ ", (end - pos): " + (end - pos)
+ ", (cap - end): " + (cap - end)
);
}
}
}
| 2,345 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/BaseWritableBufferImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_BOOLEAN_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_BOOLEAN_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_BYTE_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_BYTE_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_CHAR_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_INT_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_LONG_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_SHORT_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.unsafe;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Objects;
import org.apache.datasketches.memory.Buffer;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.ReadOnlyException;
import org.apache.datasketches.memory.WritableBuffer;
import org.apache.datasketches.memory.WritableMemory;
/**
* Common base of native-ordered and non-native-ordered {@link WritableBuffer} implementations.
* Contains methods which are agnostic to the byte order.
*/
@SuppressWarnings("restriction")
public abstract class BaseWritableBufferImpl extends BaseBufferImpl implements WritableBuffer {
//Pass-through constructor
BaseWritableBufferImpl(final long capacityBytes) { super(capacityBytes); }
/**
* The static constructor that chooses the correct ByteBuffer leaf node based on the byte order.
* @param byteBuf the ByteBuffer being wrapped
* @param localReadOnly the requested read-only state
* @param byteOrder the requested byteOrder
* @param memReqSvr the requested MemoryRequestServer, which may be null.
* @return this class constructed via the leaf node.
*/
public static WritableBuffer wrapByteBuffer(
final ByteBuffer byteBuf, final boolean localReadOnly, final ByteOrder byteOrder,
final MemoryRequestServer memReqSvr) {
final AccessByteBuffer abb = new AccessByteBuffer(byteBuf);
final int typeId = (abb.resourceReadOnly || localReadOnly) ? READONLY : 0;
final long cumOffsetBytes = abb.initialCumOffset;
final BaseWritableBufferImpl bwbi = Util.isNativeByteOrder(byteOrder)
? new BBWritableBufferImpl(abb.unsafeObj, abb.nativeBaseOffset,
abb.offsetBytes, abb.capacityBytes, typeId, cumOffsetBytes, memReqSvr, byteBuf)
: new BBNonNativeWritableBufferImpl(abb.unsafeObj, abb.nativeBaseOffset,
abb.offsetBytes, abb.capacityBytes, typeId, cumOffsetBytes, memReqSvr, byteBuf);
bwbi.setStartPositionEnd(0, byteBuf.position(), byteBuf.limit());
return bwbi;
}
//REGIONS
@Override
public Buffer region() {
return writableRegionImpl(getPosition(), getEnd() - getPosition(), true, getByteOrder());
}
@Override
public Buffer region(final long offsetBytes, final long capacityBytes, final ByteOrder byteOrder) {
final WritableBuffer buf = writableRegionImpl(offsetBytes, capacityBytes, true, byteOrder);
buf.setStartPositionEnd(0, 0, capacityBytes);
return buf;
}
@Override
public WritableBuffer writableRegion() {
return writableRegionImpl(getPosition(), getEnd() - getPosition(), false, getByteOrder());
}
@Override
public WritableBuffer writableRegion(final long offsetBytes, final long capacityBytes, final ByteOrder byteOrder) {
final WritableBuffer wbuf = writableRegionImpl(offsetBytes, capacityBytes, false, byteOrder);
wbuf.setStartPositionEnd(0, 0, capacityBytes);
return wbuf;
}
WritableBuffer writableRegionImpl(final long offsetBytes, final long capacityBytes,
final boolean localReadOnly, final ByteOrder byteOrder) {
if (isReadOnly() && !localReadOnly) {
throw new ReadOnlyException("Writable region of a read-only Buffer is not allowed.");
}
checkValidAndBounds(offsetBytes, capacityBytes);
final boolean readOnly = isReadOnly() || localReadOnly;
final WritableBuffer wbuf = toWritableRegion(offsetBytes, capacityBytes, readOnly, byteOrder);
wbuf.setStartPositionEnd(0, 0, capacityBytes);
return wbuf;
}
abstract WritableBuffer toWritableRegion(
long offsetBytes, long capcityBytes, boolean readOnly, ByteOrder byteOrder);
//DUPLICATES
@Override
public Buffer duplicate() {
return writableDuplicateImpl(true, getByteOrder());
}
@Override
public Buffer duplicate(final ByteOrder byteOrder) {
return writableDuplicateImpl(true, byteOrder);
}
@Override
public WritableBuffer writableDuplicate() {
return writableDuplicateImpl(false, getByteOrder());
}
@Override
public WritableBuffer writableDuplicate(final ByteOrder byteOrder) {
return writableDuplicateImpl(false, byteOrder);
}
WritableBuffer writableDuplicateImpl(final boolean localReadOnly, final ByteOrder byteOrder) {
if (isReadOnly() && !localReadOnly) {
throw new ReadOnlyException("Writable duplicate of a read-only Buffer is not allowed.");
}
final boolean finalReadOnly = isReadOnly() || localReadOnly;
final WritableBuffer wbuf = toDuplicate(finalReadOnly, byteOrder);
wbuf.setStartPositionEnd(getStart(), getPosition(), getEnd());
return wbuf;
}
abstract BaseWritableBufferImpl toDuplicate(boolean readOnly, ByteOrder byteOrder);
//AS MEMORY
@Override
public Memory asMemory(final ByteOrder byteOrder) {
return asWritableMemory(true, byteOrder);
}
@Override
public WritableMemory asWritableMemory(final ByteOrder byteOrder) {
return asWritableMemory(false, byteOrder);
}
WritableMemory asWritableMemory(final boolean localReadOnly, final ByteOrder byteOrder) {
Objects.requireNonNull(byteOrder, "byteOrder must be non-null");
if (isReadOnly() && !localReadOnly) {
throw new ReadOnlyException(
"Converting a read-only Buffer to a writable Memory is not allowed.");
}
final boolean finalReadOnly = isReadOnly() || localReadOnly;
final WritableMemory wmem = toWritableMemory(finalReadOnly, byteOrder);
return wmem;
}
abstract WritableMemory toWritableMemory(boolean readOnly, ByteOrder byteOrder);
//PRIMITIVE getX() and getXArray()
@Override
public final boolean getBoolean() {
final long pos = getPosition();
incrementAndCheckPositionForRead(pos, ARRAY_BOOLEAN_INDEX_SCALE);
return unsafe.getBoolean(getUnsafeObject(), getCumulativeOffset(pos));
}
@Override
public final boolean getBoolean(final long offsetBytes) {
checkValidAndBounds(offsetBytes, ARRAY_BOOLEAN_INDEX_SCALE);
return unsafe.getBoolean(getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
@Override
public final void getBooleanArray(final boolean[] dstArray, final int dstOffsetBooleans,
final int lengthBooleans) {
final long pos = getPosition();
final long copyBytes = lengthBooleans;
incrementAndCheckPositionForRead(pos, copyBytes);
ResourceImpl.checkBounds(dstOffsetBooleans, lengthBooleans, dstArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
getUnsafeObject(),
getCumulativeOffset(pos),
dstArray,
ARRAY_BOOLEAN_BASE_OFFSET + dstOffsetBooleans,
copyBytes);
}
@Override
public final byte getByte() {
final long pos = getPosition();
incrementAndCheckPositionForRead(pos, ARRAY_BYTE_INDEX_SCALE);
return unsafe.getByte(getUnsafeObject(), getCumulativeOffset(pos));
}
@Override
public final byte getByte(final long offsetBytes) {
checkValidAndBounds(offsetBytes, ARRAY_BYTE_INDEX_SCALE);
return unsafe.getByte(getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
@Override
public final void getByteArray(final byte[] dstArray, final int dstOffsetBytes,
final int lengthBytes) {
final long pos = getPosition();
final long copyBytes = lengthBytes;
incrementAndCheckPositionForRead(pos, copyBytes);
ResourceImpl.checkBounds(dstOffsetBytes, lengthBytes, dstArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
getUnsafeObject(),
getCumulativeOffset(pos),
dstArray,
ARRAY_BYTE_BASE_OFFSET + dstOffsetBytes,
copyBytes);
}
//PRIMITIVE getX() Native Endian (used by both endians)
final char getNativeOrderedChar() {
final long pos = getPosition();
incrementAndCheckPositionForRead(pos, ARRAY_CHAR_INDEX_SCALE);
return unsafe.getChar(getUnsafeObject(), getCumulativeOffset(pos));
}
final char getNativeOrderedChar(final long offsetBytes) {
checkValidAndBounds(offsetBytes, ARRAY_CHAR_INDEX_SCALE);
return unsafe.getChar(getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
final int getNativeOrderedInt() {
final long pos = getPosition();
incrementAndCheckPositionForRead(pos, ARRAY_INT_INDEX_SCALE);
return unsafe.getInt(getUnsafeObject(), getCumulativeOffset(pos));
}
final int getNativeOrderedInt(final long offsetBytes) {
checkValidAndBounds(offsetBytes, ARRAY_INT_INDEX_SCALE);
return unsafe.getInt(getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
final long getNativeOrderedLong() {
final long pos = getPosition();
incrementAndCheckPositionForRead(pos, ARRAY_LONG_INDEX_SCALE);
return unsafe.getLong(getUnsafeObject(), getCumulativeOffset(pos));
}
final long getNativeOrderedLong(final long offsetBytes) {
checkValidAndBounds(offsetBytes, ARRAY_LONG_INDEX_SCALE);
return unsafe.getLong(getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
final short getNativeOrderedShort() {
final long pos = getPosition();
incrementAndCheckPositionForRead(pos, ARRAY_SHORT_INDEX_SCALE);
return unsafe.getShort(getUnsafeObject(), getCumulativeOffset(pos));
}
final short getNativeOrderedShort(final long offsetBytes) {
checkValidAndBounds(offsetBytes, ARRAY_SHORT_INDEX_SCALE);
return unsafe.getShort(getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
//OTHER PRIMITIVE READ METHODS: copyTo, compareTo
@Override
public final int compareTo(final long thisOffsetBytes, final long thisLengthBytes,
final Buffer thatBuf, final long thatOffsetBytes, final long thatLengthBytes) {
return CompareAndCopy.compare((ResourceImpl)this, thisOffsetBytes, thisLengthBytes,
(ResourceImpl)thatBuf, thatOffsetBytes, thatLengthBytes);
}
/*
* Developer notes: There is no copyTo for Buffers because of the ambiguity of what to do with
* the positional values. Switch to MemoryImpl view to do copyTo.
*/
//PRIMITIVE putX() and putXArray() implementations
@Override
public final void putBoolean(final boolean value) {
final long pos = getPosition();
incrementAndCheckPositionForWrite(pos, ARRAY_BOOLEAN_INDEX_SCALE);
unsafe.putBoolean(getUnsafeObject(), getCumulativeOffset(pos), value);
}
@Override
public final void putBoolean(final long offsetBytes, final boolean value) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_BOOLEAN_INDEX_SCALE);
unsafe.putBoolean(getUnsafeObject(), getCumulativeOffset(offsetBytes), value);
}
@Override
public final void putBooleanArray(final boolean[] srcArray, final int srcOffsetBooleans,
final int lengthBooleans) {
final long pos = getPosition();
final long copyBytes = lengthBooleans;
incrementAndCheckPositionForWrite(pos, copyBytes);
ResourceImpl.checkBounds(srcOffsetBooleans, lengthBooleans, srcArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
srcArray,
ARRAY_BOOLEAN_BASE_OFFSET + srcOffsetBooleans,
getUnsafeObject(),
getCumulativeOffset(pos),
copyBytes);
}
@Override
public final void putByte(final byte value) {
final long pos = getPosition();
incrementAndCheckPositionForWrite(pos, ARRAY_BYTE_INDEX_SCALE);
unsafe.putByte(getUnsafeObject(), getCumulativeOffset(pos), value);
}
@Override
public final void putByte(final long offsetBytes, final byte value) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_BYTE_INDEX_SCALE);
unsafe.putByte(getUnsafeObject(), getCumulativeOffset(offsetBytes), value);
}
@Override
public final void putByteArray(final byte[] srcArray, final int srcOffsetBytes,
final int lengthBytes) {
final long pos = getPosition();
final long copyBytes = lengthBytes;
incrementAndCheckPositionForWrite(pos, copyBytes);
ResourceImpl.checkBounds(srcOffsetBytes, lengthBytes, srcArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
srcArray,
ARRAY_BYTE_BASE_OFFSET + srcOffsetBytes,
getUnsafeObject(),
getCumulativeOffset(pos),
copyBytes);
}
//PRIMITIVE putX() Native Endian (used by both endians)
final void putNativeOrderedChar(final char value) {
final long pos = getPosition();
incrementAndCheckPositionForWrite(pos, ARRAY_CHAR_INDEX_SCALE);
unsafe.putChar(getUnsafeObject(), getCumulativeOffset(pos), value);
}
final void putNativeOrderedChar(final long offsetBytes, final char value) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_CHAR_INDEX_SCALE);
unsafe.putChar(getUnsafeObject(), getCumulativeOffset(offsetBytes), value);
}
final void putNativeOrderedInt(final int value) {
final long pos = getPosition();
incrementAndCheckPositionForWrite(pos, ARRAY_INT_INDEX_SCALE);
unsafe.putInt(getUnsafeObject(), getCumulativeOffset(pos), value);
}
final void putNativeOrderedInt(final long offsetBytes, final int value) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_INT_INDEX_SCALE);
unsafe.putInt(getUnsafeObject(), getCumulativeOffset(offsetBytes), value);
}
final void putNativeOrderedLong(final long value) {
final long pos = getPosition();
incrementAndCheckPositionForWrite(pos, ARRAY_LONG_INDEX_SCALE);
unsafe.putLong(getUnsafeObject(), getCumulativeOffset(pos), value);
}
final void putNativeOrderedLong(final long offsetBytes, final long value) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_LONG_INDEX_SCALE);
unsafe.putLong(getUnsafeObject(), getCumulativeOffset(offsetBytes), value);
}
final void putNativeOrderedShort(final short value) {
final long pos = getPosition();
incrementAndCheckPositionForWrite(pos, ARRAY_SHORT_INDEX_SCALE);
unsafe.putShort(getUnsafeObject(), getCumulativeOffset(pos), value);
}
final void putNativeOrderedShort(final long offsetBytes, final short value) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_SHORT_INDEX_SCALE);
unsafe.putShort(getUnsafeObject(), getCumulativeOffset(offsetBytes), value);
}
//OTHER WRITE METHODS
/**
* Returns the primitive backing array, otherwise null.
* @return the primitive backing array, otherwise null.
*/
final Object getArray() {
checkValid();
return getUnsafeObject();
}
@Override
public final void clear() {
fill((byte)0);
}
@Override
public final void fill(final byte value) {
checkNotReadOnly();
long pos = getPosition();
long len = getEnd() - pos;
checkInvariants(getStart(), pos + len, getEnd(), getCapacity());
while (len > 0) {
final long chunk = Math.min(len, Util.UNSAFE_COPY_THRESHOLD_BYTES);
unsafe.setMemory(getUnsafeObject(), getCumulativeOffset(pos), chunk, value);
pos += chunk;
len -= chunk;
}
}
}
| 2,346 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/VirtualMachineMemory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
/**
* Extracts a version-dependent reference to the `sun.misc.VM` into a standalone
* class. The package name for VM has changed in later versions. The appropriate
* class will be loaded by the class loader depending on the Java version that is used.
* For more information, see: https://openjdk.java.net/jeps/238
*/
public final class VirtualMachineMemory {
private static final Class<?> VM_CLASS;
private static final Method VM_IS_DIRECT_MEMORY_PAGE_ALIGNED_METHOD;
private static final boolean isPageAligned;
static {
try {
VM_CLASS = Class.forName("sun.misc.VM");
VM_IS_DIRECT_MEMORY_PAGE_ALIGNED_METHOD = VM_CLASS.getDeclaredMethod("isDirectMemoryPageAligned");
VM_IS_DIRECT_MEMORY_PAGE_ALIGNED_METHOD.setAccessible(true);
isPageAligned = (boolean) VM_IS_DIRECT_MEMORY_PAGE_ALIGNED_METHOD.invoke(null); // static method
} catch (final ClassNotFoundException | NoSuchMethodException | IllegalAccessException
| IllegalArgumentException | InvocationTargetException | SecurityException e) {
throw new RuntimeException("Could not acquire sun.misc.VM class: " + e.getClass());
}
}
/**
* Returns true if the direct buffers should be page aligned.
* @return flag that determines whether direct buffers should be page aligned.
*/
public static boolean getIsPageAligned() {
return isPageAligned;
}
}
| 2,347 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/DirectWritableBufferImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableBuffer;
/**
* Implementation of {@link WritableBuffer} for direct memory, native byte order.
*
* @author Roman Leventov
* @author Lee Rhodes
*/
final class DirectWritableBufferImpl extends NativeWritableBufferImpl {
private final AllocateDirect direct;
DirectWritableBufferImpl(
final AllocateDirect direct,
final long offsetBytes,
final long capacityBytes,
final int typeId,
final long cumOffsetBytes,
final MemoryRequestServer memReqSvr) {
super(capacityBytes);
this.direct = direct;
this.offsetBytes = offsetBytes;
this.capacityBytes = capacityBytes;
this.typeId = removeNnBuf(typeId) | DIRECT | BUFFER | NATIVE; //initially cannot be ReadOnly
this.cumOffsetBytes = cumOffsetBytes;
this.memReqSvr = memReqSvr; //in ResourceImpl
if ((this.owner != null) && (this.owner != Thread.currentThread())) {
throw new IllegalStateException(THREAD_EXCEPTION_TEXT);
}
this.owner = Thread.currentThread();
}
@Override
BaseWritableBufferImpl toWritableRegion(
final long regionOffsetBytes,
final long capacityBytes,
final boolean readOnly,
final ByteOrder byteOrder) {
final long newOffsetBytes = offsetBytes + regionOffsetBytes;
final long newCumOffsetBytes = cumOffsetBytes + regionOffsetBytes;
int typeIdOut = removeNnBuf(typeId) | BUFFER | REGION | (readOnly ? READONLY : 0);
if (Util.isNativeByteOrder(byteOrder)) {
typeIdOut |= NATIVE;
return new DirectWritableBufferImpl(
direct, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr);
} else {
typeIdOut |= NONNATIVE;
return new DirectNonNativeWritableBufferImpl(
direct, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr);
}
}
@Override
BaseWritableMemoryImpl toWritableMemory(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | MEMORY | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new DirectWritableMemoryImpl(
direct, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
} else {
typeIdOut |= NONNATIVE;
return new DirectNonNativeWritableMemoryImpl(
direct, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
}
}
@Override
BaseWritableBufferImpl toDuplicate(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | BUFFER | DUPLICATE | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new DirectWritableBufferImpl(
direct, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
} else {
typeIdOut |= NONNATIVE;
return new DirectNonNativeWritableBufferImpl(
direct, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
}
}
@Override
public void close() {
checkValid();
checkThread(owner);
direct.close();
}
@Override
Object getUnsafeObject() {
return null;
}
@Override
public boolean isValid() {
return direct.getValid().get();
}
}
| 2,348 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/MapNonNativeWritableMemoryImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.WritableMemory;
/**
* Implementation of {@link WritableMemory} for map memory, non-native byte order.
*
* @author Roman Leventov
* @author Lee Rhodes
*/
final class MapNonNativeWritableMemoryImpl extends NonNativeWritableMemoryImpl {
private final AllocateDirectWritableMap dirWMap;
MapNonNativeWritableMemoryImpl(
final AllocateDirectWritableMap dirWMap,
final long offsetBytes,
final long capacityBytes,
final int typeId,
final long cumOffsetBytes) {
super();
this.dirWMap = dirWMap;
this.offsetBytes = offsetBytes;
this.capacityBytes = capacityBytes;
this.typeId = removeNnBuf(typeId) | MAP | MEMORY | NONNATIVE;
this.cumOffsetBytes = cumOffsetBytes;
if ((this.owner != null) && (this.owner != Thread.currentThread())) {
throw new IllegalStateException(THREAD_EXCEPTION_TEXT);
}
this.owner = Thread.currentThread();
}
@Override
BaseWritableMemoryImpl toWritableRegion(
final long regionOffsetBytes,
final long capacityBytes,
final boolean readOnly,
final ByteOrder byteOrder) {
final long newOffsetBytes = offsetBytes + regionOffsetBytes;
final long newCumOffsetBytes = cumOffsetBytes + regionOffsetBytes;
int typeIdOut = removeNnBuf(typeId) | MAP | REGION | (readOnly ? READONLY : 0);
if (Util.isNativeByteOrder(byteOrder)) {
typeIdOut |= NATIVE;
return new MapWritableMemoryImpl(
dirWMap, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes);
} else {
typeIdOut |= NONNATIVE;
return new MapNonNativeWritableMemoryImpl(
dirWMap, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes);
}
}
@Override
BaseWritableBufferImpl toWritableBuffer(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | BUFFER | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new MapWritableBufferImpl(
dirWMap, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes);
} else {
typeIdOut |= NONNATIVE;
return new MapNonNativeWritableBufferImpl(
dirWMap, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes);
}
}
@Override
public void close() {
checkValid();
checkThread(owner);
dirWMap.close(); //checksValidAndThread
}
@Override
public void force() {
checkValid();
checkThread(owner);
checkNotReadOnly();
dirWMap.force(); //checksValidAndThread
}
@Override
Object getUnsafeObject() {
return null;
}
@Override
public boolean isLoaded() {
checkValid();
checkThread(owner);
return dirWMap.isLoaded(); //checksValidAndThread
}
@Override
public boolean isValid() {
return dirWMap.getValid().get();
}
@Override
public void load() {
checkValid();
checkThread(owner);
dirWMap.load(); //checksValidAndThread
}
}
| 2,349 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/AccessByteBuffer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.apache.datasketches.memory.internal.UnsafeUtil.unsafe;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
/**
* Acquires access to a ByteBuffer.
*
* @author Lee Rhodes
* @author Praveenkumar Venkatesan
* @author Roman Leventov
*/
@SuppressWarnings("restriction")
final class AccessByteBuffer {
static final ByteBuffer ZERO_READ_ONLY_DIRECT_BYTE_BUFFER =
ByteBuffer.allocateDirect(0).asReadOnlyBuffer();
private static final long NIO_BUFFER_ADDRESS_FIELD_OFFSET =
UnsafeUtil.getFieldOffset(java.nio.Buffer.class, "address");
private static final long NIO_BUFFER_CAPACITY_FIELD_OFFSET =
UnsafeUtil.getFieldOffset(java.nio.Buffer.class, "capacity");
private static final long BYTE_BUFFER_HB_FIELD_OFFSET =
UnsafeUtil.getFieldOffset(java.nio.ByteBuffer.class, "hb");
private static final long BYTE_BUFFER_OFFSET_FIELD_OFFSET =
UnsafeUtil.getFieldOffset(java.nio.ByteBuffer.class, "offset");
final long nativeBaseOffset;
final long initialCumOffset;
final long capacityBytes;
final long offsetBytes;
final Object unsafeObj;
final boolean resourceReadOnly;
final ByteOrder byteOrder; //not used externally, here for reference.
/**
* The given ByteBuffer may be either readOnly or writable
* @param byteBuf the given ByteBuffer
*/
AccessByteBuffer(final ByteBuffer byteBuf) {
capacityBytes = byteBuf.capacity();
resourceReadOnly = byteBuf.isReadOnly();
byteOrder = byteBuf.order();
final boolean direct = byteBuf.isDirect();
if (direct) {
nativeBaseOffset = ((sun.nio.ch.DirectBuffer) byteBuf).address();
unsafeObj = null;
offsetBytes = 0L; //address() is already adjusted for direct slices, so offset = 0
initialCumOffset = nativeBaseOffset;
} else {
nativeBaseOffset = 0L;
// ByteBuffer.arrayOffset() and ByteBuffer.array() throw ReadOnlyBufferException if
// ByteBuffer is read-only, so this uses reflection for both writable and read-only cases.
// OffsetBytes includes the slice() offset for heap. The original array is still there because
// this is a view.
offsetBytes = unsafe.getInt(byteBuf, BYTE_BUFFER_OFFSET_FIELD_OFFSET);
unsafeObj = unsafe.getObject(byteBuf, BYTE_BUFFER_HB_FIELD_OFFSET);
initialCumOffset = UnsafeUtil.getArrayBaseOffset(unsafeObj.getClass()) + offsetBytes;
}
}
/**
* This method is adapted from
* https://github.com/odnoklassniki/one-nio/blob/master/src/one/nio/mem/DirectMemory.java
* : wrap(...). See LICENSE.
*/
static ByteBuffer getDummyReadOnlyDirectByteBuffer(final long address, final int capacity) {
final ByteBuffer byteBuf = ZERO_READ_ONLY_DIRECT_BYTE_BUFFER.duplicate();
unsafe.putLong(byteBuf, NIO_BUFFER_ADDRESS_FIELD_OFFSET, address);
unsafe.putInt(byteBuf, NIO_BUFFER_CAPACITY_FIELD_OFFSET, capacity);
byteBuf.limit(capacity);
return byteBuf;
}
}
| 2,350 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/BBWritableBufferImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableBuffer;
/**
* Implementation of {@link WritableBuffer} for ByteBuffer, native byte order.
*
* @author Roman Leventov
* @author Lee Rhodes
*/
final class BBWritableBufferImpl extends NativeWritableBufferImpl {
private final ByteBuffer byteBuf; //holds a reference to a ByteBuffer until we are done with it.
private final Object unsafeObj;
private final long nativeBaseOffset; //raw off-heap address of allocation base if ByteBuffer direct, else 0
BBWritableBufferImpl(
final Object unsafeObj,
final long nativeBaseOffset,
final long offsetBytes,
final long capacityBytes,
final int typeId,
final long cumOffsetBytes,
final MemoryRequestServer memReqSvr,
final ByteBuffer byteBuf) {
super(capacityBytes);
this.unsafeObj = unsafeObj;
this.nativeBaseOffset = nativeBaseOffset;
this.offsetBytes = offsetBytes; //in ResourceImpl
this.capacityBytes = capacityBytes; //in ResourceImpl
this.typeId = removeNnBuf(typeId) | BYTEBUF | BUFFER | NATIVE; //in ResourceImpl
this.cumOffsetBytes = cumOffsetBytes; //in ResourceImpl
this.memReqSvr = memReqSvr; //in ResourceImpl
this.byteBuf = byteBuf;
if ((this.owner != null) && (this.owner != Thread.currentThread())) {
throw new IllegalStateException(THREAD_EXCEPTION_TEXT);
}
this.owner = Thread.currentThread(); //in ResourceImpl
}
@Override
BaseWritableBufferImpl toWritableRegion(
final long regionOffsetBytes,
final long capacityBytes,
final boolean readOnly,
final ByteOrder byteOrder) {
final long newOffsetBytes = offsetBytes + regionOffsetBytes;
final long newCumOffsetBytes = cumOffsetBytes + regionOffsetBytes;
int typeIdOut = removeNnBuf(typeId) | BUFFER | REGION | (readOnly ? READONLY : 0);
if (Util.isNativeByteOrder(byteOrder)) {
typeIdOut |= NATIVE;
return new BBWritableBufferImpl(
unsafeObj, nativeBaseOffset, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr, byteBuf);
} else {
typeIdOut |= NONNATIVE;
return new BBNonNativeWritableBufferImpl(
unsafeObj, nativeBaseOffset, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr, byteBuf);
}
}
@Override
BaseWritableMemoryImpl toWritableMemory(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | MEMORY | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new BBWritableMemoryImpl(
unsafeObj, nativeBaseOffset, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr, byteBuf);
} else {
typeIdOut |= NONNATIVE;
return new BBNonNativeWritableMemoryImpl(
unsafeObj, nativeBaseOffset, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr, byteBuf);
}
}
@Override
BaseWritableBufferImpl toDuplicate(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | BUFFER | DUPLICATE | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new BBWritableBufferImpl(
unsafeObj, nativeBaseOffset, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr, byteBuf);
} else {
typeIdOut |= NONNATIVE;
return new BBNonNativeWritableBufferImpl(
unsafeObj, nativeBaseOffset, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr, byteBuf);
}
}
@Override
public ByteBuffer getByteBuffer() {
return byteBuf;
}
@Override
Object getUnsafeObject() {
return unsafeObj;
}
}
| 2,351 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/HeapNonNativeWritableBufferImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableBuffer;
/**
* Implementation of {@link WritableBuffer} for heap-based, non-native byte order.
*
* @author Roman Leventov
* @author Lee Rhodes
*/
final class HeapNonNativeWritableBufferImpl extends NonNativeWritableBufferImpl {
private final Object unsafeObj;
HeapNonNativeWritableBufferImpl(
final Object unsafeObj,
final long offsetBytes,
final long capacityBytes,
final int typeId,
final long cumOffsetBytes,
final MemoryRequestServer memReqSvr) {
super(capacityBytes);
this.unsafeObj = unsafeObj;
this.offsetBytes = offsetBytes;
this.capacityBytes = capacityBytes;
this.typeId = removeNnBuf(typeId) | HEAP | BUFFER | NONNATIVE;
this.cumOffsetBytes = cumOffsetBytes;
this.memReqSvr = memReqSvr; //in ResourceImpl
if ((this.owner != null) && (this.owner != Thread.currentThread())) {
throw new IllegalStateException(THREAD_EXCEPTION_TEXT);
}
this.owner = Thread.currentThread();
}
@Override
BaseWritableBufferImpl toWritableRegion(
final long regionOffsetBytes,
final long capacityBytes,
final boolean readOnly,
final ByteOrder byteOrder) {
final long newOffsetBytes = offsetBytes + regionOffsetBytes;
final long newCumOffsetBytes = cumOffsetBytes + regionOffsetBytes;
int typeIdOut = removeNnBuf(typeId) | BUFFER | REGION | (readOnly ? READONLY : 0);
if (Util.isNativeByteOrder(byteOrder)) {
typeIdOut |= NATIVE;
return new HeapWritableBufferImpl(
unsafeObj, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr);
} else {
typeIdOut |= NONNATIVE;
return new HeapNonNativeWritableBufferImpl(
unsafeObj, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr);
}
}
@Override
BaseWritableMemoryImpl toWritableMemory(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | MEMORY | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new HeapWritableMemoryImpl(
unsafeObj, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
} else {
typeIdOut |= NONNATIVE;
return new HeapNonNativeWritableMemoryImpl(
unsafeObj, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
}
}
@Override
BaseWritableBufferImpl toDuplicate(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | BUFFER | DUPLICATE | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new HeapWritableBufferImpl(
unsafeObj, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
} else {
typeIdOut |= NONNATIVE;
return new HeapNonNativeWritableBufferImpl(
unsafeObj, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
}
}
@Override
Object getUnsafeObject() {
return unsafeObj;
}
}
| 2,352 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/BBNonNativeWritableBufferImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableBuffer;
/**
* Implementation of {@link WritableBuffer} for ByteBuffer, non-native byte order.
*
* @author Roman Leventov
* @author Lee Rhodes
*/
final class BBNonNativeWritableBufferImpl extends NonNativeWritableBufferImpl {
private final ByteBuffer byteBuf; //holds a reference to a ByteBuffer until we are done with it.
private final Object unsafeObj;
private final long nativeBaseOffset; //raw off-heap address of allocation base if ByteBuffer direct, else 0
BBNonNativeWritableBufferImpl(
final Object unsafeObj,
final long nativeBaseOffset,
final long offsetBytes,
final long capacityBytes,
final int typeId,
final long cumOffsetBytes,
final MemoryRequestServer memReqSvr,
final ByteBuffer byteBuf) {
super(capacityBytes);
this.unsafeObj = unsafeObj;
this.nativeBaseOffset = nativeBaseOffset;
this.offsetBytes = offsetBytes; //in ResourceImpl
this.capacityBytes = capacityBytes; //in ResourceImpl
this.typeId = removeNnBuf(typeId) | BYTEBUF | BUFFER | NONNATIVE; //in ResourceImpl
this.cumOffsetBytes = cumOffsetBytes; //in ResourceImpl
this.memReqSvr = memReqSvr; //in ResourceImpl
this.byteBuf = byteBuf;
if ((this.owner != null) && (this.owner != Thread.currentThread())) {
throw new IllegalStateException(THREAD_EXCEPTION_TEXT);
}
this.owner = Thread.currentThread(); //in ResourceImpl
}
@Override
BaseWritableBufferImpl toWritableRegion(
final long regionOffsetBytes,
final long capacityBytes,
final boolean readOnly,
final ByteOrder byteOrder) {
final long newOffsetBytes = offsetBytes + regionOffsetBytes;
final long newCumOffsetBytes = cumOffsetBytes + regionOffsetBytes;
int typeIdOut = removeNnBuf(typeId) | BUFFER | REGION | (readOnly ? READONLY : 0);
if (Util.isNativeByteOrder(byteOrder)) {
typeIdOut |= NATIVE;
return new BBWritableBufferImpl(
unsafeObj, nativeBaseOffset, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr, byteBuf);
} else {
typeIdOut |= NONNATIVE;
return new BBNonNativeWritableBufferImpl(
unsafeObj, nativeBaseOffset, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr, byteBuf);
}
}
@Override
BaseWritableMemoryImpl toWritableMemory(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | MEMORY | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new BBWritableMemoryImpl(
unsafeObj, nativeBaseOffset, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr, byteBuf);
} else {
typeIdOut |= NONNATIVE;
return new BBNonNativeWritableMemoryImpl(
unsafeObj, nativeBaseOffset, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr, byteBuf);
}
}
@Override
BaseWritableBufferImpl toDuplicate(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | BUFFER | DUPLICATE | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new BBWritableBufferImpl(
unsafeObj, nativeBaseOffset, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr, byteBuf);
} else {
typeIdOut |= NONNATIVE;
return new BBNonNativeWritableBufferImpl(
unsafeObj, nativeBaseOffset, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr, byteBuf);
}
}
@Override
public ByteBuffer getByteBuffer() {
return byteBuf;
}
@Override
Object getUnsafeObject() {
return unsafeObj;
}
}
| 2,353 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/Ints.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
/** Equivalent of Guava's Ints. */
public final class Ints {
private Ints() {}
/**
* Checks if a cast of a long to an int is within the range of an int
* @param v the given long
* @return returns the cast int, or throws an exception that the long was out-of-range of an int.
*/
public static int checkedCast(final long v) {
final int result = (int) v;
if (result != v) {
throw new IllegalArgumentException("Out of range: " + v);
}
return result;
}
}
| 2,354 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/HeapWritableMemoryImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableMemory;
/**
* Implementation of {@link WritableMemory} for heap-based, native byte order.
*
* @author Roman Leventov
* @author Lee Rhodes
*/
final class HeapWritableMemoryImpl extends NativeWritableMemoryImpl {
private final Object unsafeObj;
HeapWritableMemoryImpl(
final Object unsafeObj,
final long offsetBytes,
final long capacityBytes,
final int typeId,
final long cumOffsetBytes,
final MemoryRequestServer memReqSvr) {
super();
this.unsafeObj = unsafeObj;
this.offsetBytes = offsetBytes;
this.capacityBytes = capacityBytes;
this.typeId = removeNnBuf(typeId) | HEAP | MEMORY | NATIVE;
this.cumOffsetBytes = cumOffsetBytes;
this.memReqSvr = memReqSvr; //in ResourceImpl
if ((this.owner != null) && (this.owner != Thread.currentThread())) {
throw new IllegalStateException(THREAD_EXCEPTION_TEXT);
}
this.owner = Thread.currentThread();
}
@Override
BaseWritableMemoryImpl toWritableRegion(
final long regionOffsetBytes,
final long capacityBytes,
final boolean readOnly,
final ByteOrder byteOrder) {
final long newOffsetBytes = offsetBytes + regionOffsetBytes;
final long newCumOffsetBytes = cumOffsetBytes + regionOffsetBytes;
int typeIdOut = removeNnBuf(typeId) | MEMORY | REGION | (readOnly ? READONLY : 0);
if (Util.isNativeByteOrder(byteOrder)) {
typeIdOut |= NATIVE;
return new HeapWritableMemoryImpl(
unsafeObj, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr);
} else {
typeIdOut |= NONNATIVE;
return new HeapNonNativeWritableMemoryImpl(
unsafeObj, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr);
}
}
@Override
BaseWritableBufferImpl toWritableBuffer(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | BUFFER | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new HeapWritableBufferImpl(
unsafeObj, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
} else {
typeIdOut |= NONNATIVE;
return new HeapNonNativeWritableBufferImpl(
unsafeObj, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
}
}
@Override
Object getUnsafeObject() {
return unsafeObj;
}
}
| 2,355 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/MapWritableBufferImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.WritableBuffer;
/**
* Implementation of {@link WritableBuffer} for map memory, native byte order.
*
* @author Roman Leventov
* @author Lee Rhodes
*/
final class MapWritableBufferImpl extends NativeWritableBufferImpl {
private final AllocateDirectWritableMap dirWMap;
MapWritableBufferImpl(
final AllocateDirectWritableMap dirWMap,
final long offsetBytes,
final long capacityBytes,
final int typeId,
final long cumOffsetBytes) {
super(capacityBytes);
this.dirWMap = dirWMap;
this.offsetBytes = offsetBytes;
this.capacityBytes = capacityBytes;
this.typeId = removeNnBuf(typeId) | MAP | BUFFER | NATIVE;
this.cumOffsetBytes = cumOffsetBytes;
if ((this.owner != null) && (this.owner != Thread.currentThread())) {
throw new IllegalStateException(THREAD_EXCEPTION_TEXT);
}
this.owner = Thread.currentThread();
}
@Override
BaseWritableBufferImpl toWritableRegion(
final long regionOffsetBytes,
final long capacityBytes,
final boolean readOnly,
final ByteOrder byteOrder) {
final long newOffsetBytes = offsetBytes + regionOffsetBytes;
final long newCumOffsetBytes = cumOffsetBytes + regionOffsetBytes;
int typeIdOut = removeNnBuf(typeId) | MAP | REGION | (readOnly ? READONLY : 0);
if (Util.isNativeByteOrder(byteOrder)) {
typeIdOut |= NATIVE;
return new MapWritableBufferImpl(
dirWMap, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes);
} else {
typeIdOut |= NONNATIVE;
return new MapNonNativeWritableBufferImpl(
dirWMap, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes);
}
}
@Override
BaseWritableMemoryImpl toWritableMemory(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | MEMORY | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new MapWritableMemoryImpl(
dirWMap, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes);
} else {
typeIdOut |= NONNATIVE;
return new MapNonNativeWritableMemoryImpl(
dirWMap, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes);
}
}
@Override
BaseWritableBufferImpl toDuplicate(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | BUFFER | DUPLICATE | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new MapWritableBufferImpl(
dirWMap, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes);
} else {
typeIdOut |= NONNATIVE;
return new MapNonNativeWritableBufferImpl(
dirWMap, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes);
}
}
@Override
public void close() {
checkValid();
checkThread(owner);
dirWMap.close(); //checksValidAndThread
}
@Override
public void force() {
checkValid();
checkThread(owner);
checkNotReadOnly();
dirWMap.force(); //checksValidAndThread
}
@Override
Object getUnsafeObject() {
return null;
}
@Override
public boolean isLoaded() {
checkValid();
checkThread(owner);
return dirWMap.isLoaded(); //checksValidAndThread
}
@Override
public boolean isValid() {
return dirWMap.getValid().get();
}
@Override
public void load() {
checkValid();
checkThread(owner);
dirWMap.load(); //checksValidAndThread
}
}
| 2,356 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/AllocateDirect.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.apache.datasketches.memory.internal.UnsafeUtil.unsafe;
import java.util.logging.Logger;
/**
* Provides access to direct (native) memory.
*
* @author Roman Leventov
* @author Lee Rhodes
*/
@SuppressWarnings("restriction")
final class AllocateDirect {
static final Logger LOG = Logger.getLogger(AllocateDirect.class.getCanonicalName());
private final Deallocator deallocator;
private final long nativeBaseOffset;
private final MemoryCleaner cleaner;
/**
* Base Constructor for allocate native memory.
*
* <p>Allocates and provides access to capacityBytes directly in native (off-heap) memory
* leveraging the MemoryImpl interface.
* The allocated memory will be 8-byte aligned, but may not be page aligned.
* @param capacityBytes the the requested capacity of off-heap memory. Cannot be zero.
*/
AllocateDirect(final long capacityBytes) {
final boolean pageAligned = VirtualMachineMemory.getIsPageAligned();
final long pageSize = getPageSize();
final long allocationSize = capacityBytes + (pageAligned ? pageSize : 0);
final long nativeAddress;
try {
nativeAddress = unsafe.allocateMemory(allocationSize);
} catch (final OutOfMemoryError err) {
throw new RuntimeException(err);
}
if (pageAligned && ((nativeAddress % pageSize) != 0)) {
//Round up to page boundary
nativeBaseOffset = (nativeAddress & ~(pageSize - 1L)) + pageSize;
} else {
nativeBaseOffset = nativeAddress;
}
deallocator = new Deallocator(nativeAddress);
cleaner = new MemoryCleaner(this, deallocator);
}
public void close() {
try {
if (deallocator.deallocate(false)) {
// This Cleaner.clean() call effectively just removes the Cleaner from the internal linked
// list of all cleaners. It will delegate to Deallocator.deallocate() which will be a no-op
// because the valid state is already changed.
cleaner.clean();
}
} finally {
ResourceImpl.reachabilityFence(this);
}
}
long getNativeBaseOffset() {
return nativeBaseOffset;
}
public static int getPageSize() {
return unsafe.pageSize();
}
public StepBoolean getValid() {
return deallocator.getValid();
}
private static final class Deallocator implements Runnable {
//This is the only place the actual native address is kept for use by unsafe.freeMemory();
private final long nativeAddress;
private final StepBoolean valid = new StepBoolean(true); //only place for this
Deallocator(final long nativeAddress) {
this.nativeAddress = nativeAddress;
}
StepBoolean getValid() {
return valid;
}
@Override
public void run() throws IllegalStateException {
deallocate(true);
}
boolean deallocate(final boolean calledFromCleaner) throws IllegalStateException {
if (valid.change()) {
if (calledFromCleaner) {
// Warn about non-deterministic resource cleanup.
LOG.warning("A direct resource was not closed explicitly");
}
unsafe.freeMemory(nativeAddress);
return true;
}
return false;
}
}
}
| 2,357 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/NonNativeWritableMemoryImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_DOUBLE_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_FLOAT_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.unsafe;
import org.apache.datasketches.memory.WritableMemory;
/**
* Implementation of {@link WritableMemory} for non-native endian byte order.
* @author Roman Leventov
* @author Lee Rhodes
*/
@SuppressWarnings("restriction")
abstract class NonNativeWritableMemoryImpl extends BaseWritableMemoryImpl {
//Pass-through constructor
NonNativeWritableMemoryImpl() { }
///PRIMITIVE getX() and getXArray()
@Override
public char getChar(final long offsetBytes) {
return Character.reverseBytes(getNativeOrderedChar(offsetBytes));
}
@Override
public void getCharArray(final long offsetBytes, final char[] dstArray, final int dstOffsetChars,
final int lengthChars) {
final long copyBytes = ((long) lengthChars) << CHAR_SHIFT;
checkValidAndBounds(offsetBytes, copyBytes);
CompareAndCopy.getNonNativeChars(getUnsafeObject(), getCumulativeOffset(offsetBytes),
copyBytes, dstArray, dstOffsetChars, lengthChars);
}
@Override
public double getDouble(final long offsetBytes) {
checkValidAndBounds(offsetBytes, ARRAY_DOUBLE_INDEX_SCALE);
return Double.longBitsToDouble(
Long.reverseBytes(unsafe.getLong(getUnsafeObject(), getCumulativeOffset(offsetBytes))));
}
@Override
public void getDoubleArray(final long offsetBytes, final double[] dstArray,
final int dstOffsetDoubles, final int lengthDoubles) {
final long copyBytes = ((long) lengthDoubles) << DOUBLE_SHIFT;
checkValidAndBounds(offsetBytes, copyBytes);
CompareAndCopy.getNonNativeDoubles(getUnsafeObject(), getCumulativeOffset(offsetBytes),
copyBytes, dstArray, dstOffsetDoubles, lengthDoubles);
}
@Override
public float getFloat(final long offsetBytes) {
checkValidAndBounds(offsetBytes, ARRAY_FLOAT_INDEX_SCALE);
return Float.intBitsToFloat(
Integer.reverseBytes(unsafe.getInt(getUnsafeObject(), getCumulativeOffset(offsetBytes))));
}
@Override
public void getFloatArray(final long offsetBytes, final float[] dstArray,
final int dstOffsetFloats, final int lengthFloats) {
final long copyBytes = ((long) lengthFloats) << FLOAT_SHIFT;
checkValidAndBounds(offsetBytes, copyBytes);
CompareAndCopy.getNonNativeFloats(getUnsafeObject(), getCumulativeOffset(offsetBytes),
copyBytes, dstArray, dstOffsetFloats, lengthFloats);
}
@Override
public int getInt(final long offsetBytes) {
return Integer.reverseBytes(getNativeOrderedInt(offsetBytes));
}
@Override
public void getIntArray(final long offsetBytes, final int[] dstArray, final int dstOffsetInts,
final int lengthInts) {
final long copyBytes = ((long) lengthInts) << INT_SHIFT;
checkValidAndBounds(offsetBytes, copyBytes);
CompareAndCopy.getNonNativeInts(getUnsafeObject(), getCumulativeOffset(offsetBytes), copyBytes,
dstArray, dstOffsetInts, lengthInts);
}
@Override
public long getLong(final long offsetBytes) {
return Long.reverseBytes(getNativeOrderedLong(offsetBytes));
}
@Override
public void getLongArray(final long offsetBytes, final long[] dstArray,
final int dstOffsetLongs, final int lengthLongs) {
final long copyBytes = ((long) lengthLongs) << LONG_SHIFT;
checkValidAndBounds(offsetBytes, copyBytes);
CompareAndCopy.getNonNativeLongs(getUnsafeObject(), getCumulativeOffset(offsetBytes), copyBytes,
dstArray, dstOffsetLongs, lengthLongs);
}
@Override
public short getShort(final long offsetBytes) {
return Short.reverseBytes(getNativeOrderedShort(offsetBytes));
}
@Override
public void getShortArray(final long offsetBytes, final short[] dstArray,
final int dstOffsetShorts, final int lengthShorts) {
final long copyBytes = ((long) lengthShorts) << SHORT_SHIFT;
checkValidAndBounds(offsetBytes, copyBytes);
CompareAndCopy.getNonNativeShorts(getUnsafeObject(), getCumulativeOffset(offsetBytes),
copyBytes, dstArray, dstOffsetShorts, lengthShorts);
}
//PRIMITIVE putX() and putXArray() implementations
@Override
public void putChar(final long offsetBytes, final char value) {
putNativeOrderedChar(offsetBytes, Character.reverseBytes(value));
}
@Override
public void putCharArray(final long offsetBytes, final char[] srcArray, final int srcOffsetChars,
final int lengthChars) {
final long copyBytes = ((long) lengthChars) << CHAR_SHIFT;
checkValidAndBoundsForWrite(offsetBytes, copyBytes);
CompareAndCopy.putNonNativeChars(srcArray, srcOffsetChars, lengthChars, copyBytes,
getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
@Override
public void putDouble(final long offsetBytes, final double value) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_DOUBLE_INDEX_SCALE);
unsafe.putLong(getUnsafeObject(), getCumulativeOffset(offsetBytes),
Long.reverseBytes(Double.doubleToRawLongBits(value)));
}
@Override
public void putDoubleArray(final long offsetBytes, final double[] srcArray,
final int srcOffsetDoubles, final int lengthDoubles) {
final long copyBytes = ((long) lengthDoubles) << DOUBLE_SHIFT;
checkValidAndBoundsForWrite(offsetBytes, copyBytes);
CompareAndCopy.putNonNativeDoubles(srcArray, srcOffsetDoubles, lengthDoubles, copyBytes,
getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
@Override
public void putFloat(final long offsetBytes, final float value) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_FLOAT_INDEX_SCALE);
unsafe.putInt(getUnsafeObject(), getCumulativeOffset(offsetBytes),
Integer.reverseBytes(Float.floatToRawIntBits(value)));
}
@Override
public void putFloatArray(final long offsetBytes, final float[] srcArray,
final int srcOffsetFloats, final int lengthFloats) {
final long copyBytes = ((long) lengthFloats) << FLOAT_SHIFT;
checkValidAndBoundsForWrite(offsetBytes, copyBytes);
CompareAndCopy.putNonNativeFloats(srcArray, srcOffsetFloats, lengthFloats, copyBytes,
getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
@Override
public void putInt(final long offsetBytes, final int value) {
putNativeOrderedInt(offsetBytes, Integer.reverseBytes(value));
}
@Override
public void putIntArray(final long offsetBytes, final int[] srcArray, final int srcOffsetInts,
final int lengthInts) {
final long copyBytes = ((long) lengthInts) << INT_SHIFT;
checkValidAndBoundsForWrite(offsetBytes, copyBytes);
CompareAndCopy.putNonNativeInts(srcArray, srcOffsetInts, lengthInts, copyBytes,
getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
@Override
public void putLong(final long offsetBytes, final long value) {
putNativeOrderedLong(offsetBytes, Long.reverseBytes(value));
}
@Override
public void putLongArray(final long offsetBytes, final long[] srcArray, final int srcOffsetLongs,
final int lengthLongs) {
final long copyBytes = ((long) lengthLongs) << LONG_SHIFT;
checkValidAndBoundsForWrite(offsetBytes, copyBytes);
CompareAndCopy.putNonNativeLongs(srcArray, srcOffsetLongs, lengthLongs, copyBytes,
getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
@Override
public void putShort(final long offsetBytes, final short value) {
putNativeOrderedShort(offsetBytes, Short.reverseBytes(value));
}
@Override
public void putShortArray(final long offsetBytes, final short[] srcArray,
final int srcOffsetShorts, final int lengthShorts) {
final long copyBytes = ((long) lengthShorts) << SHORT_SHIFT;
checkValidAndBoundsForWrite(offsetBytes, copyBytes);
CompareAndCopy.putNonNativeShorts(srcArray, srcOffsetShorts, lengthShorts, copyBytes,
getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
}
| 2,358 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/Utf8.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static java.lang.Character.isSurrogate;
import static java.lang.Character.isSurrogatePair;
import static java.lang.Character.toCodePoint;
import static org.apache.datasketches.memory.internal.UnsafeUtil.unsafe;
import java.io.IOException;
import java.nio.BufferOverflowException;
import java.nio.CharBuffer;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.Utf8CodingException;
import org.apache.datasketches.memory.WritableMemory;
/**
* Encoding and decoding implementations of {@link WritableMemory#putCharsToUtf8} and
* {@link Memory#getCharsFromUtf8}.
*
* <p>This is specifically designed to reduce the production of intermediate objects (garbage),
* thus significantly reducing pressure on the JVM Garbage Collector.
*
* <p>UTF-8 encoding/decoding is adapted from
* https://github.com/protocolbuffers/protobuf/blob/master/java/core/src/main/java/com/google/protobuf/Utf8.java
*
* <p>Copyright 2008 Google Inc. All rights reserved.
* https://developers.google.com/protocol-buffers/
* See LICENSE.
*
* @author Lee Rhodes
* @author Roman Leventov
*/
@SuppressWarnings("restriction")
final class Utf8 {
private Utf8() { }
//Decode
static final int getCharsFromUtf8(final long offsetBytes, final int utf8LengthBytes,
final Appendable dst, final long cumBaseOffset, final Object unsafeObj)
throws IOException, Utf8CodingException {
if ((dst instanceof CharBuffer) && ((CharBuffer) dst).hasArray()) {
return getCharBufferCharsFromUtf8(offsetBytes, ((CharBuffer) dst), utf8LengthBytes,
cumBaseOffset, unsafeObj);
}
//Decode Direct CharBuffers and all other Appendables
final long address = cumBaseOffset + offsetBytes;
// Optimize for 100% ASCII (Hotspot loves small simple top-level loops like this).
// This simple loop stops when we encounter a byte >= 0x80 (i.e. non-ASCII).
// Need to keep this loop int-indexed, because it's faster for Hotspot JIT, it doesn't insert
// savepoint polls on each iteration.
int i = 0;
for (; i < utf8LengthBytes; i++) {
final byte b = unsafe.getByte(unsafeObj, address + i);
if (!DecodeUtil.isOneByte(b)) {
break;
}
dst.append((char) b);
}
if (i == utf8LengthBytes) {
return i;
}
return getNonAsciiCharsFromUtf8(dst, address + i, address + utf8LengthBytes, unsafeObj,
cumBaseOffset) + i;
}
/*
* Optimize for heap CharBuffer manually, because Hotspot JIT doesn't itself unfold this
* abstraction well (doesn't hoist array bound checks, etc.)
*/
private static int getCharBufferCharsFromUtf8(final long offsetBytes, final CharBuffer cbuf,
final int utf8LengthBytes, final long cumBaseOffset, final Object unsafeObj) {
final char[] carr = cbuf.array();
final int startCpos = cbuf.position() + cbuf.arrayOffset();
int cpos = startCpos;
final int clim = cbuf.arrayOffset() + cbuf.limit();
final long address = cumBaseOffset + offsetBytes;
int i = 0; //byte index
// Optimize for 100% ASCII (Hotspot loves small simple top-level loops like this).
// This simple loop stops when we encounter a byte >= 0x80 (i.e. non-ASCII).
final int cbufNoCheckLimit = Math.min(utf8LengthBytes, clim - cpos);
// Need to keep this loop int-indexed, because it's faster for Hotspot JIT, it doesn't insert
// savepoint polls on each iteration.
for (; i < cbufNoCheckLimit; i++) {
final byte b = unsafe.getByte(unsafeObj, address + i);
if (!DecodeUtil.isOneByte(b)) {
break;
}
// Not checking CharBuffer bounds!
carr[cpos++] = (char) b;
}
for (; i < utf8LengthBytes; i++) {
final byte b = unsafe.getByte(unsafeObj, address + i);
if (!DecodeUtil.isOneByte(b)) {
break;
}
checkCharBufferPos(cbuf, cpos, clim);
carr[cpos++] = (char) b;
}
if (i == utf8LengthBytes) {
cbuf.position(cpos - cbuf.arrayOffset());
return cpos - startCpos;
}
return getCharBufferNonAsciiCharsFromUtf8(cbuf, carr, cpos, clim, address + i,
address + utf8LengthBytes, unsafeObj, cumBaseOffset) - cbuf.arrayOffset();
}
private static int getCharBufferNonAsciiCharsFromUtf8(final CharBuffer cbuf, final char[] carr,
int cpos, final int clim, long address, final long addressLimit, final Object unsafeObj,
final long cumBaseOffset) {
while (address < addressLimit) {
final byte byte1 = unsafe.getByte(unsafeObj, address++);
if (DecodeUtil.isOneByte(byte1)) {
checkCharBufferPos(cbuf, cpos, clim);
carr[cpos++] = (char) byte1;
// It's common for there to be multiple ASCII characters in a run mixed in, so add an
// extra optimized loop to take care of these runs.
while (address < addressLimit) {
final byte b = unsafe.getByte(unsafeObj, address);
if (!DecodeUtil.isOneByte(b)) {
break;
}
address++;
checkCharBufferPos(cbuf, cpos, clim);
carr[cpos++] = (char) b;
}
}
else if (DecodeUtil.isTwoBytes(byte1)) {
if (address >= addressLimit) {
cbuf.position(cpos - cbuf.arrayOffset());
final long off = address - cumBaseOffset;
final long limit = addressLimit - cumBaseOffset;
throw Utf8CodingException.shortUtf8DecodeByteSequence(byte1, off, limit, 2);
}
checkCharBufferPos(cbuf, cpos, clim);
DecodeUtil.handleTwoBytesCharBuffer(
byte1,
/* byte2 */ unsafe.getByte(unsafeObj, address++),
cbuf, carr, cpos);
cpos++;
}
else if (DecodeUtil.isThreeBytes(byte1)) {
if (address >= (addressLimit - 1)) {
cbuf.position(cpos - cbuf.arrayOffset());
final long off = address - cumBaseOffset;
final long limit = addressLimit - cumBaseOffset;
throw Utf8CodingException.shortUtf8DecodeByteSequence(byte1, off, limit, 3);
}
checkCharBufferPos(cbuf, cpos, clim);
DecodeUtil.handleThreeBytesCharBuffer(
byte1,
/* byte2 */ unsafe.getByte(unsafeObj, address++),
/* byte3 */ unsafe.getByte(unsafeObj, address++),
cbuf, carr, cpos);
cpos++;
}
else {
if (address >= (addressLimit - 2)) {
cbuf.position(cpos - cbuf.arrayOffset());
final long off = address - cumBaseOffset;
final long limit = addressLimit - cumBaseOffset;
throw Utf8CodingException.shortUtf8DecodeByteSequence(byte1, off, limit, 4);
}
if (cpos >= (clim - 1)) {
cbuf.position(cpos - cbuf.arrayOffset());
throw new BufferOverflowException();
}
DecodeUtil.handleFourBytesCharBuffer(
byte1,
/* byte2 */ unsafe.getByte(unsafeObj, address++),
/* byte3 */ unsafe.getByte(unsafeObj, address++),
/* byte4 */ unsafe.getByte(unsafeObj, address++),
cbuf, carr, cpos);
cpos += 2;
}
}
cbuf.position(cpos - cbuf.arrayOffset());
return cpos;
}
//Decodes into Appendable destination
//returns num of chars decoded
private static int getNonAsciiCharsFromUtf8(final Appendable dst, long address,
final long addressLimit, final Object unsafeObj, final long cumBaseOffset)
throws IOException {
int chars = 0;
while (address < addressLimit) {
final byte byte1 = unsafe.getByte(unsafeObj, address++);
if (DecodeUtil.isOneByte(byte1)) {
dst.append((char) byte1);
chars++;
// It's common for there to be multiple ASCII characters in a run mixed in, so add an
// extra optimized loop to take care of these runs.
while (address < addressLimit) {
final byte b = unsafe.getByte(unsafeObj, address);
if (!DecodeUtil.isOneByte(b)) {
break;
}
address++;
dst.append((char) b);
chars++;
}
}
else if (DecodeUtil.isTwoBytes(byte1)) {
if (address >= addressLimit) {
final long off = address - cumBaseOffset;
final long limit = addressLimit - cumBaseOffset;
throw Utf8CodingException.shortUtf8DecodeByteSequence(byte1, off, limit, 2);
}
DecodeUtil.handleTwoBytes(
byte1,
/* byte2 */ unsafe.getByte(unsafeObj, address++),
dst);
chars++;
}
else if (DecodeUtil.isThreeBytes(byte1)) {
if (address >= (addressLimit - 1)) {
final long off = address - cumBaseOffset;
final long limit = addressLimit - cumBaseOffset;
throw Utf8CodingException.shortUtf8DecodeByteSequence(byte1, off, limit, 3);
}
DecodeUtil.handleThreeBytes(
byte1,
/* byte2 */ unsafe.getByte(unsafeObj, address++),
/* byte3 */ unsafe.getByte(unsafeObj, address++),
dst);
chars++;
}
else {
if (address >= (addressLimit - 2)) {
final long off = address - cumBaseOffset;
final long limit = addressLimit - cumBaseOffset;
throw Utf8CodingException.shortUtf8DecodeByteSequence(byte1, off, limit, 4);
}
DecodeUtil.handleFourBytes(
byte1,
/* byte2 */ unsafe.getByte(unsafeObj, address++),
/* byte3 */ unsafe.getByte(unsafeObj, address++),
/* byte4 */ unsafe.getByte(unsafeObj, address++),
dst);
chars += 2;
}
}
return chars;
}
private static void checkCharBufferPos(final CharBuffer cbuf, final int cpos, final int clim) {
if (cpos == clim) {
cbuf.position(cpos - cbuf.arrayOffset());
throw new BufferOverflowException();
}
}
/******************/
//Encode
static long putCharsToUtf8(final long offsetBytes, final CharSequence src,
final long capacityBytes, final long cumBaseOffset, final Object unsafeObj) {
int cIdx = 0; //src character index
long bIdx = cumBaseOffset + offsetBytes; //byte index
long bCnt = 0; //bytes inserted
final long byteLimit = cumBaseOffset + capacityBytes; //unsafe index limit
final int utf16Length = src.length();
//Quickly dispatch an ASCII sequence
for (char c;
(cIdx < utf16Length) && ((cIdx + bIdx) < byteLimit) && ((c = src.charAt(cIdx)) < 0x80);
cIdx++, bCnt++) {
unsafe.putByte(unsafeObj, bIdx + cIdx, (byte) c);
}
//encountered a non-ascii character
if (cIdx == utf16Length) { //done.
// next relative byte index in memory is (bIdx + utf16Length) - cumBaseOffset.
return bCnt;
}
bIdx += cIdx; //bytes == characters for ascii
for (char c; cIdx < utf16Length; cIdx++) { //process the remaining characters
c = src.charAt(cIdx);
if ((c < 0x80) && (bIdx < byteLimit)) {
//Encode ASCII, 0 through 0x007F.
unsafe.putByte(unsafeObj, bIdx++, (byte) c);
bCnt++;
}
else
//c MUST BE >= 0x0080 || j >= byteLimit
if ((c < 0x800) && (bIdx < (byteLimit - 1))) {
//Encode 0x80 through 0x7FF.
//This is for almost all Latin-script alphabets plus Greek, Cyrillic, Hebrew, Arabic, etc.
//We must have target space for at least 2 Utf8 bytes.
unsafe.putByte(unsafeObj, bIdx++, (byte) ((0xF << 6) | (c >>> 6)));
unsafe.putByte(unsafeObj, bIdx++, (byte) (0x80 | (0x3F & c)));
bCnt += 2;
}
else
//c > 0x800 || j >= byteLimit - 1 || j >= byteLimit
if ( !isSurrogate(c) && (bIdx < (byteLimit - 2)) ) {
//Encode the remainder of the BMP that are not surrogates:
// 0x0800 thru 0xD7FF; 0xE000 thru 0xFFFF, the max single-char code point
//We must have target space for at least 3 Utf8 bytes.
unsafe.putByte(unsafeObj, bIdx++, (byte) ((0xF << 5) | (c >>> 12)));
unsafe.putByte(unsafeObj, bIdx++, (byte) (0x80 | (0x3F & (c >>> 6))));
unsafe.putByte(unsafeObj, bIdx++, (byte) (0x80 | (0x3F & c)));
bCnt += 3;
}
else {
//c is a surrogate || j >= byteLimit - 2 || j >= byteLimit - 1 || j >= byteLimit
//At this point we are either:
// 1) Attempting to encode Code Points outside the BMP.
//
// The only way to properly encode code points outside the BMP into Utf8 bytes is to use
// High/Low pairs of surrogate characters. Therefore, we must have at least 2 source
// characters remaining, at least 4 bytes of memory space remaining, and the next 2
// characters must be a valid surrogate pair.
//
// 2) There is insufficient MemoryImpl space to encode the current character from one of the
// ifs above.
//
// We proceed assuming (1). If the following test fails, we move to an exception.
final char low;
if ( (cIdx <= (utf16Length - 2))
&& (bIdx <= (byteLimit - 4))
&& isSurrogatePair(c, low = src.charAt(cIdx + 1)) ) { //we are good
cIdx++; //skip over low surrogate
final int codePoint = toCodePoint(c, low);
unsafe.putByte(unsafeObj, bIdx++, (byte) ((0xF << 4) | (codePoint >>> 18)));
unsafe.putByte(unsafeObj, bIdx++, (byte) (0x80 | (0x3F & (codePoint >>> 12))));
unsafe.putByte(unsafeObj, bIdx++, (byte) (0x80 | (0x3F & (codePoint >>> 6))));
unsafe.putByte(unsafeObj, bIdx++, (byte) (0x80 | (0x3F & codePoint)));
bCnt += 4;
}
else {
//We are going to throw an exception. So we have time to figure out
// what was wrong and hopefully throw an intelligent message!
//check the BMP code point cases and their required memory limits
if ( ((c < 0X0080) && (bIdx >= byteLimit))
|| ((c < 0x0800) && (bIdx >= (byteLimit - 1)))
|| ((c < 0xFFFF) && (bIdx >= (byteLimit - 2))) ) {
throw Utf8CodingException.outOfMemory();
}
if (cIdx > (utf16Length - 2)) { //the last char is an unpaired surrogate
throw Utf8CodingException.unpairedSurrogate(c);
}
if (bIdx > (byteLimit - 4)) {
//4 MemoryImpl bytes required to encode a surrogate pair.
final int remaining = (int) ((bIdx - byteLimit) + 4L);
throw Utf8CodingException.shortUtf8EncodeByteLength(remaining);
}
if (!isSurrogatePair(c, src.charAt(cIdx + 1)) ) {
//Not a surrogate pair.
throw Utf8CodingException.illegalSurrogatePair(c, src.charAt(cIdx + 1));
}
//This should not happen :)
throw new IllegalArgumentException("Unknown Utf8 encoding exception");
}
}
}
//final long localOffsetBytes = bIdx - cumBaseOffset;
return bCnt;
}
/*****************/
/**
* Utility methods for decoding UTF-8 bytes into {@link String}. Callers are responsible for
* extracting bytes (possibly using Unsafe methods), and checking remaining bytes. All other
* UTF-8 validity checks and codepoint conversions happen in this class.
*
* @see <a href="https://en.wikipedia.org/wiki/UTF-8">Wikipedia: UTF-8</a>
*/
private static class DecodeUtil {
/**
* Returns whether this is a single-byte UTF-8 encoding.
* This is for ASCII.
*
* <p>Code Plane 0, Code Point range U+0000 to U+007F.
*
* <p>Bit Patterns:
* <ul><li>Byte 1: '0xxxxxxx'<li>
* </ul>
* @param b the byte being tested
* @return true if this is a single-byte UTF-8 encoding, i.e., b is ≥ 0.
*/
static boolean isOneByte(final byte b) {
return b >= 0;
}
/**
* Returns whether this is the start of a two-byte UTF-8 encoding. One-byte encoding must
* already be excluded.
* This is for almost all Latin-script alphabets plus Greek, Cyrillic, Hebrew, Arabic, etc.
*
* <p>Code Plane 0, Code Point range U+0080 to U+07FF.
*
* <p>Bit Patterns:
* <ul><li>Byte 1: '110xxxxx'</li>
* <li>Byte 2: '10xxxxxx'</li>
* </ul>
*
* <p>All bytes must be < 0xE0.
*
* @param b the byte being tested
* @return true if this is the start of a two-byte UTF-8 encoding.
*/
static boolean isTwoBytes(final byte b) {
return b < (byte) 0xE0;
}
/**
* Returns whether this is the start of a three-byte UTF-8 encoding. Two-byte encoding must
* already be excluded.
* This is for the rest of the BMP, which includes most common Chinese, Japanese and Korean
* characters.
*
* <p>Code Plane 0, Code Point range U+0800 to U+FFFF.
*
* <p>Bit Patterns:
* <ul><li>Byte 1: '1110xxxx'</li>
* <li>Byte 2: '10xxxxxx'</li>
* <li>Byte 3: '10xxxxxx'</li>
* </ul>
* All bytes must be less than 0xF0.
*
* @param b the byte being tested
* @return true if this is the start of a three-byte UTF-8 encoding, i.e., b ≥ 0XF0.
*/
static boolean isThreeBytes(final byte b) {
return b < (byte) 0xF0;
}
/*
* Note that if three-byte UTF-8 coding has been excluded and if the current byte is
* ≥ 0XF0, it must be the start of a four-byte UTF-8 encoding.
* This is for the less common CJKV characters, historic scripts, math symbols, emoji, etc.
*
* <p>Code Plane 1 through 16, Code Point range U+10000 to U+10FFFF.
*
* <p>Bit Patterns:
* <ul><li>Byte 1: '11110xxx'</li>
* <li>Byte 2: '10xxxxxx'</li>
* <li>Byte 3: '10xxxxxx'</li>
* <li>Byte 4: '10xxxxxx'</li>
* </ul>
*/
static void handleTwoBytes(
final byte byte1, final byte byte2,
final Appendable dst)
throws IOException, Utf8CodingException {
// Simultaneously checks for illegal trailing-byte in leading position (<= '11000000') and
// overlong 2-byte, '11000001'.
if ((byte1 < (byte) 0xC2)
|| isNotTrailingByte(byte2)) {
final byte[] out = new byte[] {byte1, byte2};
throw Utf8CodingException.illegalUtf8DecodeByteSequence(out);
}
dst.append((char) (((byte1 & 0x1F) << 6) | trailingByteValue(byte2)));
}
static void handleTwoBytesCharBuffer(
final byte byte1, final byte byte2,
final CharBuffer cb, final char[] ca, final int cp)
throws Utf8CodingException {
// Simultaneously checks for illegal trailing-byte in leading position (<= '11000000') and
// overlong 2-byte, '11000001'.
if ((byte1 < (byte) 0xC2)
|| isNotTrailingByte(byte2)) {
final byte[] out = new byte[] {byte1, byte2};
cb.position(cp - cb.arrayOffset());
throw Utf8CodingException.illegalUtf8DecodeByteSequence(out);
}
ca[cp] = (char) (((byte1 & 0x1F) << 6) | trailingByteValue(byte2));
}
static void handleThreeBytes(
final byte byte1, final byte byte2, final byte byte3,
final Appendable dst)
throws IOException, Utf8CodingException {
if (isNotTrailingByte(byte2)
// overlong? 5 most significant bits must not all be zero
|| ((byte1 == (byte) 0xE0) && (byte2 < (byte) 0xA0))
// check for illegal surrogate codepoints
|| ((byte1 == (byte) 0xED) && (byte2 >= (byte) 0xA0))
|| isNotTrailingByte(byte3)) {
final byte[] out = new byte[] {byte1, byte2, byte3};
throw Utf8CodingException.illegalUtf8DecodeByteSequence(out);
}
dst.append((char)
(((byte1 & 0x0F) << 12) | (trailingByteValue(byte2) << 6) | trailingByteValue(byte3)));
}
static void handleThreeBytesCharBuffer(
final byte byte1, final byte byte2, final byte byte3,
final CharBuffer cb, final char[] ca, final int cp)
throws Utf8CodingException {
if (isNotTrailingByte(byte2)
// overlong? 5 most significant bits must not all be zero
|| ((byte1 == (byte) 0xE0) && (byte2 < (byte) 0xA0))
// check for illegal surrogate codepoints
|| ((byte1 == (byte) 0xED) && (byte2 >= (byte) 0xA0))
|| isNotTrailingByte(byte3)) {
cb.position(cp - cb.arrayOffset());
final byte[] out = new byte[] {byte1, byte2, byte3};
throw Utf8CodingException.illegalUtf8DecodeByteSequence(out);
}
ca[cp] = (char)
(((byte1 & 0x0F) << 12) | (trailingByteValue(byte2) << 6) | trailingByteValue(byte3));
}
static void handleFourBytes(
final byte byte1, final byte byte2, final byte byte3, final byte byte4,
final Appendable dst)
throws IOException, Utf8CodingException {
if (isNotTrailingByte(byte2)
// Check that 1 <= plane <= 16. Tricky optimized form of:
// valid 4-byte leading byte?
// if (byte1 > (byte) 0xF4 ||
// overlong? 4 most significant bits must not all be zero
// byte1 == (byte) 0xF0 && byte2 < (byte) 0x90 ||
// codepoint larger than the highest code point (U+10FFFF)?
// byte1 == (byte) 0xF4 && byte2 > (byte) 0x8F)
|| ((((byte1 << 28) + (byte2 - (byte) 0x90)) >> 30) != 0)
|| isNotTrailingByte(byte3)
|| isNotTrailingByte(byte4)) {
final byte[] out = new byte[] { byte1, byte2, byte3, byte4 };
throw Utf8CodingException.illegalUtf8DecodeByteSequence(out);
}
final int codepoint = ((byte1 & 0x07) << 18)
| (trailingByteValue(byte2) << 12)
| (trailingByteValue(byte3) << 6)
| trailingByteValue(byte4);
dst.append(DecodeUtil.highSurrogate(codepoint));
dst.append(DecodeUtil.lowSurrogate(codepoint));
}
static void handleFourBytesCharBuffer(
final byte byte1, final byte byte2, final byte byte3, final byte byte4,
final CharBuffer cb, final char[] ca, final int cp)
throws Utf8CodingException {
if (isNotTrailingByte(byte2)
// Check that 1 <= plane <= 16. Tricky optimized form of:
// valid 4-byte leading byte?
// if (byte1 > (byte) 0xF4 ||
// overlong? 4 most significant bits must not all be zero
// byte1 == (byte) 0xF0 && byte2 < (byte) 0x90 ||
// codepoint larger than the highest code point (U+10FFFF)?
// byte1 == (byte) 0xF4 && byte2 > (byte) 0x8F)
|| ((((byte1 << 28) + (byte2 - (byte) 0x90)) >> 30) != 0)
|| isNotTrailingByte(byte3)
|| isNotTrailingByte(byte4)) {
cb.position(cp - cb.arrayOffset());
final byte[] out = new byte[] { byte1, byte2, byte3, byte4 };
throw Utf8CodingException.illegalUtf8DecodeByteSequence(out);
}
final int codepoint = ((byte1 & 0x07) << 18)
| (trailingByteValue(byte2) << 12)
| (trailingByteValue(byte3) << 6)
| trailingByteValue(byte4);
ca[cp] = DecodeUtil.highSurrogate(codepoint);
ca[cp + 1] = DecodeUtil.lowSurrogate(codepoint);
}
/*
* Returns whether the byte is not a valid continuation of the form '10XXXXXX'.
*/
private static boolean isNotTrailingByte(final byte b) {
return b > (byte) 0xBF;
}
/*
* Returns the actual value of the trailing byte (removes the prefix '10') for composition.
*/
private static int trailingByteValue(final byte b) {
return b & 0x3F;
}
private static char highSurrogate(final int codePoint) {
return (char)
((Character.MIN_HIGH_SURROGATE
- (Character.MIN_SUPPLEMENTARY_CODE_POINT >>> 10))
+ (codePoint >>> 10));
}
private static char lowSurrogate(final int codePoint) {
return (char) (Character.MIN_LOW_SURROGATE + (codePoint & 0x3ff));
}
}
}
| 2,359 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/StepBoolean.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
/**
* This is a step boolean function that can change its state only once.
*
* @author Lee Rhodes
*/
public final class StepBoolean {
private static final int FALSE = 0;
private static final int TRUE = 1;
private static final AtomicIntegerFieldUpdater<StepBoolean> STATE_FIELD_UPDATER =
AtomicIntegerFieldUpdater.newUpdater(StepBoolean.class, "state");
private final int initialState;
private volatile int state;
/**
* Defines the initial state
* @param initialState the given initial state
*/
public StepBoolean(final boolean initialState) {
this.initialState = initialState ? TRUE : FALSE;
state = this.initialState;
}
/**
* Gets the current state.
* @return the current state.
*/
public boolean get() {
return state == TRUE;
}
/**
* This changes the state of this step boolean function if it has not yet changed.
* @return true if this call led to the change of the state; false if the state has already been
* changed
*/
public boolean change() {
final int notInitialState = initialState == TRUE ? FALSE : TRUE;
return STATE_FIELD_UPDATER.compareAndSet(this, initialState, notInitialState);
}
/**
* Return true if the state has changed from the initial state
* @return true if the state has changed from the initial state
*/
public boolean hasChanged() {
return state != initialState;
}
}
| 2,360 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/MapNonNativeWritableBufferImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.WritableBuffer;
/**
* Implementation of {@link WritableBuffer} for map memory, non-native byte order.
*
* @author Roman Leventov
* @author Lee Rhodes
*/
final class MapNonNativeWritableBufferImpl extends NonNativeWritableBufferImpl {
private final AllocateDirectWritableMap dirWMap;
MapNonNativeWritableBufferImpl(
final AllocateDirectWritableMap dirWMap,
final long offsetBytes,
final long capacityBytes,
final int typeId,
final long cumOffsetBytes) {
super(capacityBytes);
this.dirWMap = dirWMap;
this.offsetBytes = offsetBytes;
this.capacityBytes = capacityBytes;
this.typeId = removeNnBuf(typeId) | MAP | BUFFER | NONNATIVE;
this.cumOffsetBytes = cumOffsetBytes;
if ((this.owner != null) && (this.owner != Thread.currentThread())) {
throw new IllegalStateException(THREAD_EXCEPTION_TEXT);
}
this.owner = Thread.currentThread();
}
@Override
BaseWritableBufferImpl toWritableRegion(
final long regionOffsetBytes,
final long capacityBytes,
final boolean readOnly,
final ByteOrder byteOrder) {
final long newOffsetBytes = offsetBytes + regionOffsetBytes;
final long newCumOffsetBytes = cumOffsetBytes + regionOffsetBytes;
int typeIdOut = removeNnBuf(typeId) | MAP | REGION | (readOnly ? READONLY : 0);
if (Util.isNativeByteOrder(byteOrder)) {
typeIdOut |= NATIVE;
return new MapWritableBufferImpl(
dirWMap, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes);
} else {
typeIdOut |= NONNATIVE;
return new MapNonNativeWritableBufferImpl(
dirWMap, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes);
}
}
@Override
BaseWritableMemoryImpl toWritableMemory(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | MEMORY | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new MapWritableMemoryImpl(
dirWMap, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes);
} else {
typeIdOut |= NONNATIVE;
return new MapNonNativeWritableMemoryImpl(
dirWMap, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes);
}
}
@Override
BaseWritableBufferImpl toDuplicate(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | BUFFER | DUPLICATE | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new MapWritableBufferImpl(
dirWMap, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes);
} else {
typeIdOut |= NONNATIVE;
return new MapNonNativeWritableBufferImpl(
dirWMap, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes);
}
}
@Override
public void close() {
checkValid();
checkThread(owner);
dirWMap.close(); //checksValidAndThread
}
@Override
public void force() {
checkValid();
checkThread(owner);
checkNotReadOnly();
dirWMap.force(); //checksValidAndThread
}
@Override
Object getUnsafeObject() {
return null;
}
@Override
public boolean isLoaded() {
checkValid();
checkThread(owner);
return dirWMap.isLoaded(); //checksValidAndThread
}
@Override
public boolean isValid() {
return dirWMap.getValid().get();
}
@Override
public void load() {
checkValid();
checkThread(owner);
dirWMap.load(); //checksValidAndThread
}
}
| 2,361 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/BBWritableMemoryImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableMemory;
/**
* Implementation of {@link WritableMemory} for ByteBuffer, native byte order.
*
* @author Roman Leventov
* @author Lee Rhodes
*/
final class BBWritableMemoryImpl extends NativeWritableMemoryImpl {
private final ByteBuffer byteBuf; //holds a reference to a ByteBuffer until we are done with it.
private final Object unsafeObj;
private final long nativeBaseOffset; //raw off-heap address of allocation base if ByteBuffer direct, else 0
BBWritableMemoryImpl(
final Object unsafeObj,
final long nativeBaseOffset,
final long offsetBytes,
final long capacityBytes,
final int typeId,
final long cumOffsetBytes,
final MemoryRequestServer memReqSvr,
final ByteBuffer byteBuf) {
super();
this.unsafeObj = unsafeObj;
this.nativeBaseOffset = nativeBaseOffset;
this.offsetBytes = offsetBytes;
this.capacityBytes = capacityBytes;
this.typeId = removeNnBuf(typeId) | BYTEBUF | MEMORY | NATIVE;
this.cumOffsetBytes = cumOffsetBytes;
this.memReqSvr = memReqSvr; //in ResourceImpl
this.byteBuf = byteBuf;
if ((this.owner != null) && (this.owner != Thread.currentThread())) {
throw new IllegalStateException(THREAD_EXCEPTION_TEXT);
}
this.owner = Thread.currentThread();
}
@Override
BaseWritableMemoryImpl toWritableRegion(
final long regionOffsetBytes,
final long capacityBytes,
final boolean readOnly,
final ByteOrder byteOrder) {
final long newOffsetBytes = offsetBytes + regionOffsetBytes;
final long newCumOffsetBytes = cumOffsetBytes + regionOffsetBytes;
int typeIdOut = removeNnBuf(typeId) | MEMORY | REGION | (readOnly ? READONLY : 0);
if (Util.isNativeByteOrder(byteOrder)) {
typeIdOut |= NATIVE;
return new BBWritableMemoryImpl(
unsafeObj, nativeBaseOffset, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr, byteBuf);
} else {
typeIdOut |= NONNATIVE;
return new BBNonNativeWritableMemoryImpl(
unsafeObj, nativeBaseOffset, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr, byteBuf);
}
}
@Override
BaseWritableBufferImpl toWritableBuffer(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | BUFFER | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new BBWritableBufferImpl(
unsafeObj, nativeBaseOffset, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr, byteBuf);
} else {
typeIdOut |= NONNATIVE;
return new BBNonNativeWritableBufferImpl(
unsafeObj, nativeBaseOffset, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr, byteBuf);
}
}
@Override
public ByteBuffer getByteBuffer() {
return byteBuf;
}
@Override
Object getUnsafeObject() {
return unsafeObj;
}
}
| 2,362 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/HeapNonNativeWritableMemoryImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableMemory;
/**
* Implementation of {@link WritableMemory} for heap-based, non-native byte order.
*
* @author Roman Leventov
* @author Lee Rhodes
*/
final class HeapNonNativeWritableMemoryImpl extends NonNativeWritableMemoryImpl {
private final Object unsafeObj;
HeapNonNativeWritableMemoryImpl(
final Object unsafeObj,
final long offsetBytes,
final long capacityBytes,
final int typeId,
final long cumOffsetBytes,
final MemoryRequestServer memReqSvr) {
super();
this.unsafeObj = unsafeObj;
this.offsetBytes = offsetBytes;
this.capacityBytes = capacityBytes;
this.typeId = removeNnBuf(typeId) | HEAP | MEMORY | NONNATIVE;
this.cumOffsetBytes = cumOffsetBytes;
this.memReqSvr = memReqSvr; //in ResourceImpl
if ((this.owner != null) && (this.owner != Thread.currentThread())) {
throw new IllegalStateException(THREAD_EXCEPTION_TEXT);
}
this.owner = Thread.currentThread();
}
@Override
BaseWritableMemoryImpl toWritableRegion(
final long regionOffsetBytes,
final long capacityBytes,
final boolean readOnly,
final ByteOrder byteOrder) {
final long newOffsetBytes = offsetBytes + regionOffsetBytes;
final long newCumOffsetBytes = cumOffsetBytes + regionOffsetBytes;
int typeIdOut = removeNnBuf(typeId) | MEMORY | REGION | (readOnly ? READONLY : 0);
if (Util.isNativeByteOrder(byteOrder)) {
typeIdOut |= NATIVE;
return new HeapWritableMemoryImpl(
unsafeObj, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr);
} else {
typeIdOut |= NONNATIVE;
return new HeapNonNativeWritableMemoryImpl(
unsafeObj, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr);
}
}
@Override
BaseWritableBufferImpl toWritableBuffer(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | BUFFER | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new HeapWritableBufferImpl(
unsafeObj, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
} else {
typeIdOut |= NONNATIVE;
return new HeapNonNativeWritableBufferImpl(
unsafeObj, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
}
}
@Override
Object getUnsafeObject() {
return unsafeObj;
}
}
| 2,363 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/BaseWritableMemoryImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_BOOLEAN_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_BOOLEAN_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_BYTE_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_BYTE_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_CHAR_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_INT_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_LONG_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_SHORT_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.unsafe;
import static org.apache.datasketches.memory.internal.Util.negativeCheck;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.channels.WritableByteChannel;
import java.util.Objects;
import org.apache.datasketches.memory.Buffer;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.ReadOnlyException;
import org.apache.datasketches.memory.Utf8CodingException;
import org.apache.datasketches.memory.WritableBuffer;
import org.apache.datasketches.memory.WritableMemory;
/**
* Common base of native-ordered and non-native-ordered {@link WritableMemory} implementations.
* Contains methods which are agnostic to the byte order.
*/
@SuppressWarnings("restriction")
public abstract class BaseWritableMemoryImpl extends ResourceImpl implements WritableMemory {
//1KB of empty bytes for speedy clear()
private final static byte[] EMPTY_BYTES;
static {
EMPTY_BYTES = new byte[1024];
}
//Pass-through constructor
BaseWritableMemoryImpl() { }
/**
* The static constructor that chooses the correct Heap leaf node based on the byte order.
* @param arr the primitive heap array being wrapped
* @param offsetBytes the offset bytes into the array (independent of array type).
* @param lengthBytes the length of the wrapped region.
* @param localReadOnly the requested read-only status
* @param byteOrder the requested byte order
* @param memReqSvr the requested MemoryRequestServer, which may be null.
* @return this class constructed via the leaf node.
*/
public static WritableMemory wrapHeapArray(final Object arr, final long offsetBytes, final long lengthBytes,
final boolean localReadOnly, final ByteOrder byteOrder, final MemoryRequestServer memReqSvr) {
final long cumOffsetBytes = UnsafeUtil.getArrayBaseOffset(arr.getClass()) + offsetBytes;
final int typeId = (localReadOnly ? READONLY : 0);
return Util.isNativeByteOrder(byteOrder)
? new HeapWritableMemoryImpl(arr, offsetBytes, lengthBytes, typeId, cumOffsetBytes, memReqSvr)
: new HeapNonNativeWritableMemoryImpl(arr, offsetBytes, lengthBytes, typeId, cumOffsetBytes, memReqSvr);
}
/**
* The static constructor that chooses the correct ByteBuffer leaf node based on the byte order.
* @param byteBuf the ByteBuffer being wrapped
* @param localReadOnly the requested read-only state
* @param byteOrder the requested byteOrder
* @param memReqSvr the requested MemoryRequestServer, which may be null.
* @return this class constructed via the leaf node.
*/
public static WritableMemory wrapByteBuffer(
final ByteBuffer byteBuf, final boolean localReadOnly, final ByteOrder byteOrder,
final MemoryRequestServer memReqSvr) {
final AccessByteBuffer abb = new AccessByteBuffer(byteBuf);
final int typeId = (abb.resourceReadOnly || localReadOnly) ? READONLY : 0;
final long cumOffsetBytes = abb.offsetBytes + (abb.unsafeObj == null
? abb.nativeBaseOffset
: UnsafeUtil.getArrayBaseOffset(abb.unsafeObj.getClass()));
return Util.isNativeByteOrder(byteOrder)
? new BBWritableMemoryImpl(abb.unsafeObj, abb.nativeBaseOffset,
abb.offsetBytes, abb.capacityBytes, typeId, cumOffsetBytes, memReqSvr, byteBuf)
: new BBNonNativeWritableMemoryImpl(abb.unsafeObj, abb.nativeBaseOffset,
abb.offsetBytes, abb.capacityBytes, typeId, cumOffsetBytes, memReqSvr, byteBuf);
}
/**
* The static constructor that chooses the correct Map leaf node based on the byte order.
* @param file the file being wrapped.
* @param fileOffsetBytes the file offset bytes
* @param capacityBytes the requested capacity of the memory mapped region
* @param localReadOnly the requested read-only state
* @param byteOrder the requested byte-order
* @return this class constructed via the leaf node.
*/
public static WritableMemory wrapMap(final File file, final long fileOffsetBytes,
final long capacityBytes, final boolean localReadOnly, final ByteOrder byteOrder) {
final AllocateDirectWritableMap dirWMap =
new AllocateDirectWritableMap(file, fileOffsetBytes, capacityBytes, localReadOnly);
final int typeId = (dirWMap.resourceReadOnly || localReadOnly) ? READONLY : 0;
final long cumOffsetBytes = dirWMap.nativeBaseOffset;
final BaseWritableMemoryImpl wmem = Util.isNativeByteOrder(byteOrder)
? new MapWritableMemoryImpl(
dirWMap,
0L,
capacityBytes,
typeId,
cumOffsetBytes)
: new MapNonNativeWritableMemoryImpl(
dirWMap,
0L,
capacityBytes,
typeId,
cumOffsetBytes);
return wmem;
}
/**
* The static constructor that chooses the correct Direct leaf node based on the byte order.
* @param capacityBytes the requested capacity for the Direct (off-heap) memory
* @param byteOrder the requested byte order
* @param memReqSvr the requested MemoryRequestServer, which may be null
* @return this class constructed via the leaf node.
*/
public static WritableMemory wrapDirect(final long capacityBytes,
final ByteOrder byteOrder, final MemoryRequestServer memReqSvr) {
final AllocateDirect direct = new AllocateDirect(capacityBytes);
final int typeId = 0; //direct is never read-only on construction
final long nativeBaseOffset = direct.getNativeBaseOffset();
final long cumOffsetBytes = nativeBaseOffset;
final BaseWritableMemoryImpl wmem = Util.isNativeByteOrder(byteOrder)
? new DirectWritableMemoryImpl(
direct,
0L,
capacityBytes,
typeId,
cumOffsetBytes,
memReqSvr)
: new DirectNonNativeWritableMemoryImpl(
direct,
0L,
capacityBytes,
typeId,
cumOffsetBytes,
memReqSvr);
return wmem;
}
//REGIONS
@Override
public Memory region(final long regionOffsetBytes, final long capacityBytes, final ByteOrder byteOrder) {
return writableRegionImpl(regionOffsetBytes, capacityBytes, true, byteOrder);
}
@Override
public WritableMemory writableRegion(final long regionOffsetBytes, final long capacityBytes,
final ByteOrder byteOrder) {
return writableRegionImpl(regionOffsetBytes, capacityBytes, false, byteOrder);
}
private WritableMemory writableRegionImpl(final long regionOffsetBytes, final long capacityBytes,
final boolean localReadOnly, final ByteOrder byteOrder) {
if (isReadOnly() && !localReadOnly) {
throw new ReadOnlyException("Writable region of a read-only Memory is not allowed.");
}
negativeCheck(regionOffsetBytes, "offsetBytes must be >= 0");
negativeCheck(capacityBytes, "capacityBytes must be >= 0");
Objects.requireNonNull(byteOrder, "byteOrder must be non-null.");
checkValidAndBounds(regionOffsetBytes, capacityBytes);
final boolean finalReadOnly = isReadOnly() || localReadOnly;
return toWritableRegion(regionOffsetBytes, capacityBytes, finalReadOnly, byteOrder);
}
abstract WritableMemory toWritableRegion(
long regionOffsetBytes, long capacityBytes, boolean finalReadOnly, ByteOrder byteOrder);
//AS BUFFER
@Override
public Buffer asBuffer(final ByteOrder byteOrder) {
return asWritableBuffer(true, byteOrder);
}
@Override
public WritableBuffer asWritableBuffer(final ByteOrder byteOrder) {
return asWritableBuffer(false, byteOrder);
}
private WritableBuffer asWritableBuffer(final boolean localReadOnly, final ByteOrder byteOrder) {
Objects.requireNonNull(byteOrder, "byteOrder must be non-null");
if (isReadOnly() && !localReadOnly) {
throw new ReadOnlyException(
"Converting a read-only Memory to a writable Buffer is not allowed.");
}
final boolean finalReadOnly = isReadOnly() || localReadOnly;
final WritableBuffer wbuf = toWritableBuffer(finalReadOnly, byteOrder);
wbuf.setStartPositionEnd(0, 0, getCapacity());
return wbuf;
}
abstract WritableBuffer toWritableBuffer(boolean finalReadOnly, ByteOrder byteOrder);
//PRIMITIVE getX() and getXArray()
@Override
public final boolean getBoolean(final long offsetBytes) {
checkValidAndBounds(offsetBytes, ARRAY_BOOLEAN_INDEX_SCALE);
return unsafe.getBoolean(getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
@Override
public final void getBooleanArray(final long offsetBytes, final boolean[] dstArray,
final int dstOffsetBooleans, final int lengthBooleans) {
final long copyBytes = lengthBooleans;
checkValidAndBounds(offsetBytes, copyBytes);
ResourceImpl.checkBounds(dstOffsetBooleans, lengthBooleans, dstArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
getUnsafeObject(),
getCumulativeOffset(offsetBytes),
dstArray,
ARRAY_BOOLEAN_BASE_OFFSET + dstOffsetBooleans,
copyBytes);
}
@Override
public final byte getByte(final long offsetBytes) {
checkValidAndBounds(offsetBytes, ARRAY_BYTE_INDEX_SCALE);
return unsafe.getByte(getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
@Override
public final void getByteArray(final long offsetBytes, final byte[] dstArray,
final int dstOffsetBytes, final int lengthBytes) {
final long copyBytes = lengthBytes;
checkValidAndBounds(offsetBytes, copyBytes);
ResourceImpl.checkBounds(dstOffsetBytes, lengthBytes, dstArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
getUnsafeObject(),
getCumulativeOffset(offsetBytes),
dstArray,
ARRAY_BYTE_BASE_OFFSET + dstOffsetBytes,
copyBytes);
}
@Override
public final int getCharsFromUtf8(final long offsetBytes, final int utf8LengthBytes,
final Appendable dst) throws IOException, Utf8CodingException {
checkValidAndBounds(offsetBytes, utf8LengthBytes);
return Utf8.getCharsFromUtf8(offsetBytes, utf8LengthBytes, dst, getCumulativeOffset(0),
getUnsafeObject());
}
@Override
public final int getCharsFromUtf8(final long offsetBytes, final int utf8LengthBytes,
final StringBuilder dst) throws Utf8CodingException {
try {
// Ensure that we do at most one resize of internal StringBuilder's char array
dst.ensureCapacity(dst.length() + utf8LengthBytes);
return getCharsFromUtf8(offsetBytes, utf8LengthBytes, (Appendable) dst);
} catch (final IOException e) {
throw new RuntimeException("Should not happen", e);
}
}
//PRIMITIVE getX() Native Endian (used by both endians)
final char getNativeOrderedChar(final long offsetBytes) {
checkValidAndBounds(offsetBytes, ARRAY_CHAR_INDEX_SCALE);
return unsafe.getChar(getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
final int getNativeOrderedInt(final long offsetBytes) {
checkValidAndBounds(offsetBytes, ARRAY_INT_INDEX_SCALE);
return unsafe.getInt(getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
final long getNativeOrderedLong(final long offsetBytes) {
checkValidAndBounds(offsetBytes, ARRAY_LONG_INDEX_SCALE);
return unsafe.getLong(getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
final short getNativeOrderedShort(final long offsetBytes) {
checkValidAndBounds(offsetBytes, ARRAY_SHORT_INDEX_SCALE);
return unsafe.getShort(getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
//OTHER PRIMITIVE READ METHODS: compareTo, copyTo, equals
@Override
public final int compareTo(final long thisOffsetBytes, final long thisLengthBytes,
final Memory thatMem, final long thatOffsetBytes, final long thatLengthBytes) {
return CompareAndCopy.compare((ResourceImpl)this, thisOffsetBytes, thisLengthBytes,
(ResourceImpl)thatMem, thatOffsetBytes, thatLengthBytes);
}
@Override
public final void copyTo(final long srcOffsetBytes, final WritableMemory destination,
final long dstOffsetBytes, final long lengthBytes) {
CompareAndCopy.copy((ResourceImpl)this, srcOffsetBytes, (ResourceImpl)destination,
dstOffsetBytes, lengthBytes);
}
@Override
public final void writeTo(final long offsetBytes, final long lengthBytes,
final WritableByteChannel out) throws IOException {
checkValidAndBounds(offsetBytes, lengthBytes);
if (getUnsafeObject() instanceof byte[]) {
writeByteArrayTo((byte[]) getUnsafeObject(), offsetBytes, lengthBytes, out);
} else if (getUnsafeObject() == null) {
writeDirectMemoryTo(offsetBytes, lengthBytes, out);
} else {
// Memory is backed by some array that is not byte[], for example int[], long[], etc.
// We don't have the choice to do an extra intermediate copy.
writeToWithExtraCopy(offsetBytes, lengthBytes, out);
}
}
//PRIMITIVE putX() and putXArray() implementations
@Override
public final void putBoolean(final long offsetBytes, final boolean value) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_BOOLEAN_INDEX_SCALE);
unsafe.putBoolean(getUnsafeObject(), getCumulativeOffset(offsetBytes), value);
}
@Override
public final void putBooleanArray(final long offsetBytes, final boolean[] srcArray,
final int srcOffsetBooleans, final int lengthBooleans) {
final long copyBytes = lengthBooleans;
checkValidAndBoundsForWrite(offsetBytes, copyBytes);
ResourceImpl.checkBounds(srcOffsetBooleans, lengthBooleans, srcArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
srcArray,
ARRAY_BOOLEAN_BASE_OFFSET + srcOffsetBooleans,
getUnsafeObject(),
getCumulativeOffset(offsetBytes),
copyBytes
);
}
@Override
public final void putByte(final long offsetBytes, final byte value) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_BYTE_INDEX_SCALE);
unsafe.putByte(getUnsafeObject(), getCumulativeOffset(offsetBytes), value);
}
@Override
public final void putByteArray(final long offsetBytes, final byte[] srcArray,
final int srcOffsetBytes, final int lengthBytes) {
final long copyBytes = lengthBytes;
checkValidAndBoundsForWrite(offsetBytes, copyBytes);
ResourceImpl.checkBounds(srcOffsetBytes, lengthBytes, srcArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
srcArray,
ARRAY_BYTE_BASE_OFFSET + srcOffsetBytes,
getUnsafeObject(),
getCumulativeOffset(offsetBytes),
copyBytes
);
}
@Override
public final long putCharsToUtf8(final long offsetBytes, final CharSequence src) {
checkValid();
return Utf8.putCharsToUtf8(offsetBytes, src, getCapacity(), getCumulativeOffset(0),
getUnsafeObject());
}
//PRIMITIVE putX() Native Endian (used by both endians)
final void putNativeOrderedChar(final long offsetBytes, final char value) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_CHAR_INDEX_SCALE);
unsafe.putChar(getUnsafeObject(), getCumulativeOffset(offsetBytes), value);
}
final void putNativeOrderedInt(final long offsetBytes, final int value) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_INT_INDEX_SCALE);
unsafe.putInt(getUnsafeObject(), getCumulativeOffset(offsetBytes), value);
}
final void putNativeOrderedLong(final long offsetBytes, final long value) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_LONG_INDEX_SCALE);
unsafe.putLong(getUnsafeObject(), getCumulativeOffset(offsetBytes), value);
}
final void putNativeOrderedShort(final long offsetBytes, final short value) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_SHORT_INDEX_SCALE);
unsafe.putShort(getUnsafeObject(), getCumulativeOffset(offsetBytes), value);
}
//OTHER WRITE METHODS
/**
* Returns the primitive backing array, otherwise null.
* @return the primitive backing array, otherwise null.
*/
final Object getArray() {
checkValid();
return getUnsafeObject();
}
@Override
public final void clear() {
clear(0, getCapacity());
}
@Override
public final void clear(final long offsetBytes, final long lengthBytes)
{
//No need to check bounds, since putByteArray calls checkValidAndBoundsForWrite
final long endBytes = offsetBytes + lengthBytes;
for (long i = offsetBytes; i < endBytes; i += EMPTY_BYTES.length) {
putByteArray(i, EMPTY_BYTES, 0, (int) Math.min(EMPTY_BYTES.length, endBytes - i));
}
}
@Override
public final void clearBits(final long offsetBytes, final byte bitMask) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_BYTE_INDEX_SCALE);
final long cumBaseOff = getCumulativeOffset(offsetBytes);
int value = unsafe.getByte(getUnsafeObject(), cumBaseOff) & 0XFF;
value &= ~bitMask;
unsafe.putByte(getUnsafeObject(), cumBaseOff, (byte)value);
}
@Override
public final void fill(final byte value) {
fill(0, getCapacity(), value);
}
@Override
public final void fill(long offsetBytes, long lengthBytes, final byte value) {
checkValidAndBoundsForWrite(offsetBytes, lengthBytes);
while (lengthBytes > 0) {
final long chunk = Math.min(lengthBytes, Util.UNSAFE_COPY_THRESHOLD_BYTES);
unsafe.setMemory(getUnsafeObject(), getCumulativeOffset(offsetBytes), chunk, value);
offsetBytes += chunk;
lengthBytes -= chunk;
}
}
@Override
public final void setBits(final long offsetBytes, final byte bitMask) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_BYTE_INDEX_SCALE);
final long myOffset = getCumulativeOffset(offsetBytes);
final byte value = unsafe.getByte(getUnsafeObject(), myOffset);
unsafe.putByte(getUnsafeObject(), myOffset, (byte)(value | bitMask));
}
//RESTRICTED
private void writeByteArrayTo(final byte[] unsafeObj, final long offsetBytes,
final long lengthBytes, final WritableByteChannel out) throws IOException {
final int off =
Ints.checkedCast((getCumulativeOffset(offsetBytes)) - UnsafeUtil.ARRAY_BYTE_BASE_OFFSET);
final int len = Ints.checkedCast(lengthBytes);
final ByteBuffer bufToWrite = ByteBuffer.wrap(unsafeObj, off, len);
writeFully(bufToWrite, out);
}
private void writeDirectMemoryTo(final long offsetBytes, long lengthBytes,
final WritableByteChannel out) throws IOException {
long addr = getCumulativeOffset(offsetBytes);
// Do chunking, because it's likely that WritableByteChannel.write(ByteBuffer) in some network-
// or file-backed WritableByteChannel implementations with direct ByteBuffer argument could
// be subject of the same safepoint problems as in Unsafe.copyMemory and Unsafe.setMemory.
while (lengthBytes > 0) {
final int chunk = (int) Math.min(Util.UNSAFE_COPY_THRESHOLD_BYTES, lengthBytes);
final ByteBuffer bufToWrite = AccessByteBuffer.getDummyReadOnlyDirectByteBuffer(addr, chunk);
writeFully(bufToWrite, out);
addr += chunk;
lengthBytes -= chunk;
}
}
private void writeToWithExtraCopy(long offsetBytes, long lengthBytes,
final WritableByteChannel out) throws IOException {
// Keep the bufLen a multiple of 8, to maybe allow getByteArray() to go a faster path.
final int bufLen = Ints.checkedCast(Math.max(8, Math.min((getCapacity() / 1024) & ~7L, 4096)));
final byte[] buf = new byte[bufLen];
final ByteBuffer bufToWrite = ByteBuffer.wrap(buf);
while (lengthBytes > 0) {
final int chunk = (int) Math.min(buf.length, lengthBytes);
getByteArray(offsetBytes, buf, 0, chunk);
bufToWrite.clear().limit(chunk);
writeFully(bufToWrite, out);
offsetBytes += chunk;
lengthBytes -= chunk;
}
}
private static void writeFully(final ByteBuffer bufToWrite, final WritableByteChannel out)
throws IOException {
while (bufToWrite.remaining() > 0) {
out.write(bufToWrite);
}
}
}
| 2,364 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/DirectWritableMemoryImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableMemory;
/**
* Implementation of {@link WritableMemory} for direct memory, native byte order.
*
* @author Roman Leventov
* @author Lee Rhodes
*/
final class DirectWritableMemoryImpl extends NativeWritableMemoryImpl {
private final AllocateDirect direct;
DirectWritableMemoryImpl(
final AllocateDirect direct,
final long offsetBytes,
final long capacityBytes,
final int typeId,
final long cumOffsetBytes,
final MemoryRequestServer memReqSvr) {
super();
this.direct = direct;
this.offsetBytes = offsetBytes;
this.capacityBytes = capacityBytes;
this.typeId = removeNnBuf(typeId) | DIRECT | MEMORY | NATIVE; //initially cannot be ReadOnly
this.cumOffsetBytes = cumOffsetBytes;
this.memReqSvr = memReqSvr; //in ResourceImpl
if ((this.owner != null) && (this.owner != Thread.currentThread())) {
throw new IllegalStateException(THREAD_EXCEPTION_TEXT);
}
this.owner = Thread.currentThread();
}
@Override
BaseWritableMemoryImpl toWritableRegion(
final long regionOffsetBytes,
final long capacityBytes,
final boolean readOnly,
final ByteOrder byteOrder) {
final long newOffsetBytes = offsetBytes + regionOffsetBytes;
final long newCumOffsetBytes = cumOffsetBytes + regionOffsetBytes;
int typeIdOut = removeNnBuf(typeId) | MEMORY | REGION | (readOnly ? READONLY : 0);
if (Util.isNativeByteOrder(byteOrder)) {
typeIdOut |= NATIVE;
return new DirectWritableMemoryImpl(
direct, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr);
} else {
typeIdOut |= NONNATIVE;
return new DirectNonNativeWritableMemoryImpl(
direct, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr);
}
}
@Override
BaseWritableBufferImpl toWritableBuffer(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | BUFFER | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new DirectWritableBufferImpl(
direct, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
} else {
typeIdOut |= NONNATIVE;
return new DirectNonNativeWritableBufferImpl(
direct, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
}
}
@Override
public void close() {
checkValid();
checkThread(owner);
direct.close();
}
@Override
Object getUnsafeObject() {
return null;
}
@Override
public boolean isValid() {
return direct.getValid().get();
}
}
| 2,365 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/CompareAndCopy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.apache.datasketches.memory.internal.UnsafeUtil.CHAR_SHIFT;
import static org.apache.datasketches.memory.internal.UnsafeUtil.DOUBLE_SHIFT;
import static org.apache.datasketches.memory.internal.UnsafeUtil.FLOAT_SHIFT;
import static org.apache.datasketches.memory.internal.UnsafeUtil.INT_SHIFT;
import static org.apache.datasketches.memory.internal.UnsafeUtil.LONG_SHIFT;
import static org.apache.datasketches.memory.internal.UnsafeUtil.SHORT_SHIFT;
import static org.apache.datasketches.memory.internal.UnsafeUtil.unsafe;
import static org.apache.datasketches.memory.internal.Util.UNSAFE_COPY_THRESHOLD_BYTES;
/**
* @author Lee Rhodes
*/
@SuppressWarnings("restriction")
final class CompareAndCopy {
private CompareAndCopy() { }
static int compare(
final ResourceImpl state1, final long offsetBytes1, final long lengthBytes1,
final ResourceImpl state2, final long offsetBytes2, final long lengthBytes2) {
state1.checkValid();
ResourceImpl.checkBounds(offsetBytes1, lengthBytes1, state1.getCapacity());
state2.checkValid();
ResourceImpl.checkBounds(offsetBytes2, lengthBytes2, state2.getCapacity());
final long cumOff1 = state1.getCumulativeOffset(offsetBytes1);
final long cumOff2 = state2.getCumulativeOffset(offsetBytes2);
final Object arr1 = state1.getUnsafeObject();
final Object arr2 = state2.getUnsafeObject();
if ((arr1 != arr2) || (cumOff1 != cumOff2)) {
final long lenBytes = Math.min(lengthBytes1, lengthBytes2);
for (long i = 0; i < lenBytes; i++) {
final int byte1 = unsafe.getByte(arr1, cumOff1 + i);
final int byte2 = unsafe.getByte(arr2, cumOff2 + i);
if (byte1 < byte2) { return -1; }
if (byte1 > byte2) { return 1; }
}
}
return Long.compare(lengthBytes1, lengthBytes2);
}
static boolean equals(final ResourceImpl state1, final ResourceImpl state2) {
final long cap1 = state1.getCapacity();
final long cap2 = state2.getCapacity();
return (cap1 == cap2) && equals(state1, 0, state2, 0, cap1);
}
//Developer notes: this is subtlety different from (compare == 0) in that this has an early
// stop if the arrays and offsets are the same as there is only one length. Also this can take
// advantage of chunking with longs, while compare cannot.
static boolean equals(
final ResourceImpl state1, final long offsetBytes1,
final ResourceImpl state2, final long offsetBytes2, long lengthBytes) {
state1.checkValid();
ResourceImpl.checkBounds(offsetBytes1, lengthBytes, state1.getCapacity());
state2.checkValid();
ResourceImpl.checkBounds(offsetBytes2, lengthBytes, state2.getCapacity());
long cumOff1 = state1.getCumulativeOffset(offsetBytes1);
long cumOff2 = state2.getCumulativeOffset(offsetBytes2);
final Object arr1 = state1.getUnsafeObject(); //could be null
final Object arr2 = state2.getUnsafeObject(); //could be null
if ((arr1 == arr2) && (cumOff1 == cumOff2)) { return true; }
while (lengthBytes >= Long.BYTES) {
final int chunk = (int) Math.min(lengthBytes, UNSAFE_COPY_THRESHOLD_BYTES);
// int-counted loop to avoid safepoint polls (otherwise why we chunk by
// UNSAFE_COPY_MEMORY_THRESHOLD)
int i = 0;
for (; i <= (chunk - Long.BYTES); i += Long.BYTES) {
final long v1 = unsafe.getLong(arr1, cumOff1 + i);
final long v2 = unsafe.getLong(arr2, cumOff2 + i);
if (v1 != v2) { return false; }
}
lengthBytes -= i;
cumOff1 += i;
cumOff2 += i;
}
//check the remainder bytes, if any
return (lengthBytes == 0) || equalsByBytes(arr1, cumOff1, arr2, cumOff2, (int) lengthBytes);
}
//use only for short runs
private static boolean equalsByBytes(final Object arr1, final long cumOff1, final Object arr2,
final long cumOff2, final int lenBytes) {
// int-counted loop to avoid safepoint polls
for (int i = 0; i < lenBytes; i++) {
final int v1 = unsafe.getByte(arr1, cumOff1 + i);
final int v2 = unsafe.getByte(arr2, cumOff2 + i);
if (v1 != v2) { return false; }
}
return true;
}
static void copy(final ResourceImpl srcState, final long srcOffsetBytes,
final ResourceImpl dstState, final long dstOffsetBytes, final long lengthBytes) {
srcState.checkValid();
ResourceImpl.checkBounds(srcOffsetBytes, lengthBytes, srcState.getCapacity());
dstState.checkValid();
ResourceImpl.checkBounds(dstOffsetBytes, lengthBytes, dstState.getCapacity());
final long srcAdd = srcState.getCumulativeOffset(srcOffsetBytes);
final long dstAdd = dstState.getCumulativeOffset(dstOffsetBytes);
copyMemory(srcState.getUnsafeObject(), srcAdd, dstState.getUnsafeObject(), dstAdd,
lengthBytes);
}
//Used by all of the get/put array methods in BufferImpl and MemoryImpl classes
static final void copyMemoryCheckingDifferentObject(final Object srcUnsafeObj,
final long srcAdd, final Object dstUnsafeObj, final long dstAdd, final long lengthBytes) {
if (srcUnsafeObj != dstUnsafeObj) {
copyNonOverlappingMemoryWithChunking(srcUnsafeObj, srcAdd, dstUnsafeObj, dstAdd,
lengthBytes);
} else {
throw new IllegalArgumentException("Not expecting to copy to/from array which is the "
+ "underlying object of the memory at the same time");
}
}
//only valid and bounds checks have been performed at this point
private static void copyMemory(final Object srcUnsafeObj, final long srcAdd,
final Object dstUnsafeObj, final long dstAdd, final long lengthBytes) {
if (srcUnsafeObj != dstUnsafeObj) {
//either srcArray != dstArray OR one of them is off-heap
copyNonOverlappingMemoryWithChunking(srcUnsafeObj, srcAdd, dstUnsafeObj, dstAdd,
lengthBytes);
} else { //either srcArray == dstArray OR both src and dst are off-heap
copyMemoryOverlapAddressCheck(srcUnsafeObj, srcAdd, dstUnsafeObj, dstAdd, lengthBytes);
}
}
/**
* At this point either srcArray == dstArray OR both src and dst are off-heap.
* Performs overlapping address check. If addresses do not overlap, proceed to
* {@link #copyNonOverlappingMemoryWithChunking(Object, long, Object, long, long)}; otherwise
* fall back on <i>Unsafe.copyMemory(...)</i> tolerating potentially long
* Time to Safe Point pauses.
* If srcAdd == dstAdd an exception will be thrown.
* @param srcUnsafeObj The source array object, it may be null.
* @param srcAdd The cumulative source offset
* @param dstUnsafeObj The destination array object, it may be null
* @param dstAdd The cumulative destination offset
* @param lengthBytes The length to be copied in bytes
*/
private static void copyMemoryOverlapAddressCheck(final Object srcUnsafeObj, final long srcAdd,
final Object dstUnsafeObj, final long dstAdd, final long lengthBytes) {
if (((srcAdd + lengthBytes) <= dstAdd) || ((dstAdd + lengthBytes) <= srcAdd)) {
copyNonOverlappingMemoryWithChunking(srcUnsafeObj, srcAdd, dstUnsafeObj, dstAdd,
lengthBytes);
return;
}
if (srcAdd == dstAdd) {
throw new IllegalArgumentException(
"Attempt to copy a block of memory exactly in-place, should be a bug");
}
// If regions do overlap, fall back to unsafe.copyMemory, tolerating potentially long
// Time to Safe Point pauses.
unsafe.copyMemory(srcUnsafeObj, srcAdd, dstUnsafeObj, dstAdd, lengthBytes);
}
/**
* This copies only non-overlapping memory in chunks to avoid safepoint delays.
* Java 9 may not require the chunking.
* @param srcUnsafeObj The source array object, it may be null.
* @param srcAdd The cumulative source offset
* @param dstUnsafeObj The destination array object, it may be null
* @param dstAdd The cumulative destination offset
* @param lengthBytes The length to be copied in bytes
* @see #UNSAFE_COPY_THRESHOLD_BYTES
*/
private static void copyNonOverlappingMemoryWithChunking(final Object srcUnsafeObj,
long srcAdd, final Object dstUnsafeObj, long dstAdd, long lengthBytes) {
while (lengthBytes > 0) {
final long chunk = Math.min(lengthBytes, UNSAFE_COPY_THRESHOLD_BYTES);
unsafe.copyMemory(srcUnsafeObj, srcAdd, dstUnsafeObj, dstAdd, chunk);
lengthBytes -= chunk;
srcAdd += chunk;
dstAdd += chunk;
}
}
static void getNonNativeChars(final Object unsafeObj, long cumOffsetBytes,
long copyBytes, final char[] dstArray, int dstOffsetChars,
int lengthChars) {
ResourceImpl.checkBounds(dstOffsetChars, lengthChars, dstArray.length);
while (copyBytes > UNSAFE_COPY_THRESHOLD_BYTES) {
final long chunkBytes = Math.min(copyBytes, UNSAFE_COPY_THRESHOLD_BYTES);
final int chunkChars = (int) (chunkBytes >> CHAR_SHIFT);
getCharArrayChunk(unsafeObj, cumOffsetBytes, dstArray, dstOffsetChars, chunkChars);
cumOffsetBytes += chunkBytes;
dstOffsetChars += chunkChars;
copyBytes -= chunkBytes;
lengthChars -= chunkChars;
}
getCharArrayChunk(unsafeObj, cumOffsetBytes, dstArray, dstOffsetChars, lengthChars);
}
private static void getCharArrayChunk(final Object unsafeObj, final long cumOffsetBytes,
final char[] dstArray, final int dstOffsetChars, final int lengthChars) {
// JDK 9 adds native intrinsics for such bulk non-native ordered primitive memory copy.
// TODO-JDK9 use them when the library adds support for JDK 9
// int-counted loop to avoid safepoint polls
for (int i = 0; i < lengthChars; i++) {
dstArray[dstOffsetChars + i] = Character.reverseBytes(
unsafe.getChar(unsafeObj, cumOffsetBytes + (((long) i) << CHAR_SHIFT)));
}
}
static void getNonNativeDoubles(final Object unsafeObj, long cumOffsetBytes,
long copyBytes, final double[] dstArray, int dstOffsetDoubles,
int lengthDoubles) {
ResourceImpl.checkBounds(dstOffsetDoubles, lengthDoubles, dstArray.length);
while (copyBytes > UNSAFE_COPY_THRESHOLD_BYTES) {
final long chunkBytes = Math.min(copyBytes, UNSAFE_COPY_THRESHOLD_BYTES);
final int chunkDoubles = (int) (chunkBytes >> DOUBLE_SHIFT);
getDoubleArrayChunk(unsafeObj, cumOffsetBytes,
dstArray, dstOffsetDoubles, chunkDoubles);
cumOffsetBytes += chunkBytes;
dstOffsetDoubles += chunkDoubles;
copyBytes -= chunkBytes;
lengthDoubles -= chunkDoubles;
}
getDoubleArrayChunk(unsafeObj, cumOffsetBytes,
dstArray, dstOffsetDoubles, lengthDoubles);
}
private static void getDoubleArrayChunk(final Object unsafeObj, final long cumOffsetBytes,
final double[] dstArray, final int dstOffsetDoubles, final int lengthDoubles) {
// JDK 9 adds native intrinsics for such bulk non-native ordered primitive memory copy.
// TODO-JDK9 use them when the library adds support for JDK 9
// int-counted loop to avoid safepoint polls
for (int i = 0; i < lengthDoubles; i++) {
dstArray[dstOffsetDoubles + i] = Double.longBitsToDouble(Long.reverseBytes(
unsafe.getLong(unsafeObj, cumOffsetBytes + (((long) i) << DOUBLE_SHIFT))));
}
}
static void getNonNativeFloats(final Object unsafeObj, long cumOffsetBytes,
long copyBytes, final float[] dstArray, int dstOffsetFloats,
int lengthFloats) {
ResourceImpl.checkBounds(dstOffsetFloats, lengthFloats, dstArray.length);
while (copyBytes > UNSAFE_COPY_THRESHOLD_BYTES) {
final long chunkBytes = Math.min(copyBytes, UNSAFE_COPY_THRESHOLD_BYTES);
final int chunkFloats = (int) (chunkBytes >> FLOAT_SHIFT);
getFloatArrayChunk(unsafeObj, cumOffsetBytes, dstArray, dstOffsetFloats, chunkFloats);
cumOffsetBytes += chunkBytes;
dstOffsetFloats += chunkFloats;
copyBytes -= chunkBytes;
lengthFloats -= chunkFloats;
}
getFloatArrayChunk(unsafeObj, cumOffsetBytes, dstArray, dstOffsetFloats, lengthFloats);
}
private static void getFloatArrayChunk(final Object unsafeObj, final long cumOffsetBytes,
final float[] dstArray, final int dstOffsetFloats, final int lengthFloats) {
// JDK 9 adds native intrinsics for such bulk non-native ordered primitive memory copy.
// TODO-JDK9 use them when the library adds support for JDK 9
// int-counted loop to avoid safepoint polls
for (int i = 0; i < lengthFloats; i++) {
dstArray[dstOffsetFloats + i] = Float.intBitsToFloat(Integer.reverseBytes(
unsafe.getInt(unsafeObj, cumOffsetBytes + (((long) i) << FLOAT_SHIFT))));
}
}
static void getNonNativeInts(final Object unsafeObj, long cumOffsetBytes,
long copyBytes, final int[] dstArray, int dstOffsetInts,
int lengthInts) {
ResourceImpl.checkBounds(dstOffsetInts, lengthInts, dstArray.length);
while (copyBytes > UNSAFE_COPY_THRESHOLD_BYTES) {
final long chunkBytes = Math.min(copyBytes, UNSAFE_COPY_THRESHOLD_BYTES);
final int chunkInts = (int) (chunkBytes >> INT_SHIFT);
getIntArrayChunk(unsafeObj, cumOffsetBytes, dstArray, dstOffsetInts, chunkInts);
cumOffsetBytes += chunkBytes;
dstOffsetInts += chunkInts;
copyBytes -= chunkBytes;
lengthInts -= chunkInts;
}
getIntArrayChunk(unsafeObj, cumOffsetBytes, dstArray, dstOffsetInts, lengthInts);
}
private static void getIntArrayChunk(final Object unsafeObj, final long cumOffsetBytes,
final int[] dstArray, final int dstOffsetInts, final int lengthInts) {
// JDK 9 adds native intrinsics for such bulk non-native ordered primitive memory copy.
// TODO-JDK9 use them when the library adds support for JDK 9
// int-counted loop to avoid safepoint polls
for (int i = 0; i < lengthInts; i++) {
dstArray[dstOffsetInts + i] = Integer.reverseBytes(
unsafe.getInt(unsafeObj, cumOffsetBytes + (((long) i) << INT_SHIFT)));
}
}
static void getNonNativeLongs(final Object unsafeObj, long cumOffsetBytes,
long copyBytes, final long[] dstArray, int dstOffsetLongs,
int lengthLongs) {
ResourceImpl.checkBounds(dstOffsetLongs, lengthLongs, dstArray.length);
while (copyBytes > UNSAFE_COPY_THRESHOLD_BYTES) {
final long chunkBytes = Math.min(copyBytes, UNSAFE_COPY_THRESHOLD_BYTES);
final int chunkLongs = (int) (chunkBytes >> LONG_SHIFT);
getLongArrayChunk(unsafeObj, cumOffsetBytes, dstArray, dstOffsetLongs, chunkLongs);
cumOffsetBytes += chunkBytes;
dstOffsetLongs += chunkLongs;
copyBytes -= chunkBytes;
lengthLongs -= chunkLongs;
}
getLongArrayChunk(unsafeObj, cumOffsetBytes, dstArray, dstOffsetLongs, lengthLongs);
}
private static void getLongArrayChunk(final Object unsafeObj, final long cumOffsetBytes,
final long[] dstArray, final int dstOffsetLongs, final int lengthLongs) {
// JDK 9 adds native intrinsics for such bulk non-native ordered primitive memory copy.
// TODO-JDK9 use them when the library adds support for JDK 9
// int-counted loop to avoid safepoint polls
for (int i = 0; i < lengthLongs; i++) {
dstArray[dstOffsetLongs + i] = Long.reverseBytes(
unsafe.getLong(unsafeObj, cumOffsetBytes + (((long) i) << LONG_SHIFT)));
}
}
static void getNonNativeShorts(final Object unsafeObj, long cumOffsetBytes,
long copyBytes, final short[] dstArray, int dstOffsetShorts,
int lengthShorts) {
ResourceImpl.checkBounds(dstOffsetShorts, lengthShorts, dstArray.length);
while (copyBytes > UNSAFE_COPY_THRESHOLD_BYTES) {
final long chunkBytes = Math.min(copyBytes, UNSAFE_COPY_THRESHOLD_BYTES);
final int chunkShorts = (int) (chunkBytes >> SHORT_SHIFT);
getShortArrayChunk(unsafeObj, cumOffsetBytes, dstArray, dstOffsetShorts, chunkShorts);
cumOffsetBytes += chunkBytes;
dstOffsetShorts += chunkShorts;
copyBytes -= chunkBytes;
lengthShorts -= chunkShorts;
}
getShortArrayChunk(unsafeObj, cumOffsetBytes, dstArray, dstOffsetShorts, lengthShorts);
}
private static void getShortArrayChunk(final Object unsafeObj, final long cumOffsetBytes,
final short[] dstArray, final int dstOffsetShorts, final int lengthShorts) {
// JDK 9 adds native intrinsics for such bulk non-native ordered primitive memory copy.
// TODO-JDK9 use them when the library adds support for JDK 9
// int-counted loop to avoid safepoint polls
for (int i = 0; i < lengthShorts; i++) {
dstArray[dstOffsetShorts + i] = Short.reverseBytes(
unsafe.getShort(unsafeObj, cumOffsetBytes + (((long) i) << SHORT_SHIFT)));
}
}
static void putNonNativeChars(final char[] srcArray, int srcOffsetChars, int lengthChars,
long copyBytes, final Object unsafeObj, long cumOffsetBytes) {
ResourceImpl.checkBounds(srcOffsetChars, lengthChars, srcArray.length);
while (copyBytes > UNSAFE_COPY_THRESHOLD_BYTES) {
final long chunkBytes = Math.min(copyBytes, UNSAFE_COPY_THRESHOLD_BYTES);
final int chunkChars = (int) (chunkBytes >> CHAR_SHIFT);
putCharArrayChunk(srcArray, srcOffsetChars, chunkChars, unsafeObj, cumOffsetBytes);
cumOffsetBytes += chunkBytes;
srcOffsetChars += chunkChars;
copyBytes -= chunkBytes;
lengthChars -= chunkChars;
}
putCharArrayChunk(srcArray, srcOffsetChars, lengthChars, unsafeObj, cumOffsetBytes);
}
private static void putCharArrayChunk(final char[] srcArray, final int srcOffsetChars,
final int lengthChars, final Object unsafeObj, final long cumOffsetBytes) {
// JDK 9 adds native intrinsics for such bulk non-native ordered primitive memory copy.
// TODO-JDK9 use them when the library adds support for JDK 9
// int-counted loop to avoid safepoint polls
for (int i = 0; i < lengthChars; i++) {
unsafe.putChar(unsafeObj, cumOffsetBytes + (((long) i) << CHAR_SHIFT),
Character.reverseBytes(srcArray[srcOffsetChars + i]));
}
}
static void putNonNativeDoubles(final double[] srcArray, int srcOffsetDoubles,
int lengthDoubles, long copyBytes, final Object unsafeObj, long cumOffsetBytes) {
ResourceImpl.checkBounds(srcOffsetDoubles, lengthDoubles, srcArray.length);
while (copyBytes > UNSAFE_COPY_THRESHOLD_BYTES) {
final long chunkBytes = Math.min(copyBytes, UNSAFE_COPY_THRESHOLD_BYTES);
final int chunkDoubles = (int) (chunkBytes >> DOUBLE_SHIFT);
putDoubleArrayChunk(srcArray, srcOffsetDoubles, chunkDoubles,
unsafeObj, cumOffsetBytes);
cumOffsetBytes += chunkBytes;
srcOffsetDoubles += chunkDoubles;
copyBytes -= chunkBytes;
lengthDoubles -= chunkDoubles;
}
putDoubleArrayChunk(srcArray, srcOffsetDoubles, lengthDoubles,
unsafeObj, cumOffsetBytes);
}
private static void putDoubleArrayChunk(final double[] srcArray, final int srcOffsetDoubles,
final int lengthDoubles, final Object unsafeObj, final long cumOffsetBytes) {
// JDK 9 adds native intrinsics for such bulk non-native ordered primitive memory copy.
// TODO-JDK9 use them when the library adds support for JDK 9
// int-counted loop to avoid safepoint polls
for (int i = 0; i < lengthDoubles; i++) {
unsafe.putLong(unsafeObj, cumOffsetBytes + (((long) i) << DOUBLE_SHIFT),
Long.reverseBytes(Double.doubleToRawLongBits(srcArray[srcOffsetDoubles + i])));
}
}
static void putNonNativeFloats(final float[] srcArray, int srcOffsetFloats,
int lengthFloats, long copyBytes, final Object unsafeObj, long cumOffsetBytes) {
ResourceImpl.checkBounds(srcOffsetFloats, lengthFloats, srcArray.length);
while (copyBytes > UNSAFE_COPY_THRESHOLD_BYTES) {
final long chunkBytes = Math.min(copyBytes, UNSAFE_COPY_THRESHOLD_BYTES);
final int chunkFloats = (int) (chunkBytes >> FLOAT_SHIFT);
putFloatArrayChunk(srcArray, srcOffsetFloats, chunkFloats, unsafeObj, cumOffsetBytes);
cumOffsetBytes += chunkBytes;
srcOffsetFloats += chunkFloats;
copyBytes -= chunkBytes;
lengthFloats -= chunkFloats;
}
putFloatArrayChunk(srcArray, srcOffsetFloats, lengthFloats, unsafeObj, cumOffsetBytes);
}
private static void putFloatArrayChunk(final float[] srcArray, final int srcOffsetFloats,
final int lengthFloats, final Object unsafeObj, final long cumOffsetBytes) {
// JDK 9 adds native intrinsics for such bulk non-native ordered primitive memory copy.
// TODO-JDK9 use them when the library adds support for JDK 9
// int-counted loop to avoid safepoint polls
for (int i = 0; i < lengthFloats; i++) {
unsafe.putInt(unsafeObj, cumOffsetBytes + (((long) i) << FLOAT_SHIFT),
Integer.reverseBytes(Float.floatToRawIntBits(srcArray[srcOffsetFloats + i])));
}
}
static void putNonNativeInts(final int[] srcArray, int srcOffsetInts, int lengthInts,
long copyBytes, final Object unsafeObj, long cumOffsetBytes) {
ResourceImpl.checkBounds(srcOffsetInts, lengthInts, srcArray.length);
while (copyBytes > UNSAFE_COPY_THRESHOLD_BYTES) {
final long chunkBytes = Math.min(copyBytes, UNSAFE_COPY_THRESHOLD_BYTES);
final int chunkInts = (int) (chunkBytes >> INT_SHIFT);
putIntArrayChunk(srcArray, srcOffsetInts, chunkInts, unsafeObj, cumOffsetBytes);
cumOffsetBytes += chunkBytes;
srcOffsetInts += chunkInts;
copyBytes -= chunkBytes;
lengthInts -= chunkInts;
}
putIntArrayChunk(srcArray, srcOffsetInts, lengthInts, unsafeObj, cumOffsetBytes);
}
private static void putIntArrayChunk(final int[] srcArray, final int srcOffsetInts,
final int lengthInts, final Object unsafeObj, final long cumOffsetBytes) {
// JDK 9 adds native intrinsics for such bulk non-native ordered primitive memory copy.
// TODO-JDK9 use them when the library adds support for JDK 9
// int-counted loop to avoid safepoint polls
for (int i = 0; i < lengthInts; i++) {
unsafe.putInt(unsafeObj, cumOffsetBytes + (((long) i) << INT_SHIFT),
Integer.reverseBytes(srcArray[srcOffsetInts + i]));
}
}
static void putNonNativeLongs(final long[] srcArray, int srcOffsetLongs, int lengthLongs,
long copyBytes, final Object unsafeObj, long cumOffsetBytes) {
ResourceImpl.checkBounds(srcOffsetLongs, lengthLongs, srcArray.length);
while (copyBytes > UNSAFE_COPY_THRESHOLD_BYTES) {
final long chunkBytes = Math.min(copyBytes, UNSAFE_COPY_THRESHOLD_BYTES);
final int chunkLongs = (int) (chunkBytes >> LONG_SHIFT);
putLongArrayChunk(srcArray, srcOffsetLongs, chunkLongs, unsafeObj, cumOffsetBytes);
cumOffsetBytes += chunkBytes;
srcOffsetLongs += chunkLongs;
copyBytes -= chunkBytes;
lengthLongs -= chunkLongs;
}
putLongArrayChunk(srcArray, srcOffsetLongs, lengthLongs, unsafeObj, cumOffsetBytes);
}
private static void putLongArrayChunk(final long[] srcArray, final int srcOffsetLongs,
final int lengthLongs, final Object unsafeObj, final long cumOffsetBytes) {
// JDK 9 adds native intrinsics for such bulk non-native ordered primitive memory copy.
// TODO-JDK9 use them when the library adds support for JDK 9
// int-counted loop to avoid safepoint polls
for (int i = 0; i < lengthLongs; i++) {
unsafe.putLong(unsafeObj, cumOffsetBytes + (((long) i) << LONG_SHIFT),
Long.reverseBytes(srcArray[srcOffsetLongs + i]));
}
}
static void putNonNativeShorts(final short[] srcArray, int srcOffsetShorts,
int lengthShorts, long copyBytes, final Object unsafeObj, long cumOffsetBytes) {
ResourceImpl.checkBounds(srcOffsetShorts, lengthShorts, srcArray.length);
while (copyBytes > UNSAFE_COPY_THRESHOLD_BYTES) {
final long chunkBytes = Math.min(copyBytes, UNSAFE_COPY_THRESHOLD_BYTES);
final int chunkShorts = (int) (chunkBytes >> SHORT_SHIFT);
putShortArrayChunk(srcArray, srcOffsetShorts, chunkShorts, unsafeObj, cumOffsetBytes);
cumOffsetBytes += chunkBytes;
srcOffsetShorts += chunkShorts;
copyBytes -= chunkBytes;
lengthShorts -= chunkShorts;
}
putShortArrayChunk(srcArray, srcOffsetShorts, lengthShorts, unsafeObj, cumOffsetBytes);
}
private static void putShortArrayChunk(final short[] srcArray, final int srcOffsetShorts,
final int lengthShorts, final Object unsafeObj, final long cumOffsetBytes) {
// JDK 9 adds native intrinsics for such bulk non-native ordered primitive memory copy.
// TODO-JDK9 use them when the library adds support for JDK 9
// int-counted loop to avoid safepoint polls
for (int i = 0; i < lengthShorts; i++) {
unsafe.putShort(unsafeObj, cumOffsetBytes + (((long) i) << SHORT_SHIFT),
Short.reverseBytes(srcArray[srcOffsetShorts + i]));
}
}
}
| 2,366 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/HeapWritableBufferImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableBuffer;
/**
* Implementation of {@link WritableBuffer} for heap-based, native byte order.
*
* @author Roman Leventov
* @author Lee Rhodes
*/
final class HeapWritableBufferImpl extends NativeWritableBufferImpl {
private final Object unsafeObj;
HeapWritableBufferImpl(
final Object unsafeObj,
final long offsetBytes,
final long capacityBytes,
final int typeId,
final long cumOffsetBytes,
final MemoryRequestServer memReqSvr) {
super(capacityBytes);
this.unsafeObj = unsafeObj;
this.offsetBytes = offsetBytes;
this.capacityBytes = capacityBytes;
this.typeId = removeNnBuf(typeId) | HEAP | BUFFER | NATIVE;
this.cumOffsetBytes = cumOffsetBytes;
this.memReqSvr = memReqSvr; //in ResourceImpl
if ((this.owner != null) && (this.owner != Thread.currentThread())) {
throw new IllegalStateException(THREAD_EXCEPTION_TEXT);
}
this.owner = Thread.currentThread();
}
@Override
BaseWritableBufferImpl toWritableRegion(
final long regionOffsetBytes,
final long capacityBytes,
final boolean readOnly,
final ByteOrder byteOrder) {
final long newOffsetBytes = offsetBytes + regionOffsetBytes;
final long newCumOffsetBytes = cumOffsetBytes + regionOffsetBytes;
int typeIdOut = removeNnBuf(typeId) | BUFFER | REGION | (readOnly ? READONLY : 0);
if (Util.isNativeByteOrder(byteOrder)) {
typeIdOut |= NATIVE;
return new HeapWritableBufferImpl(
unsafeObj, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr);
} else {
typeIdOut |= NONNATIVE;
return new HeapNonNativeWritableBufferImpl(
unsafeObj, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr);
}
}
@Override
BaseWritableMemoryImpl toWritableMemory(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | MEMORY | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new HeapWritableMemoryImpl(
unsafeObj, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
} else {
typeIdOut |= NONNATIVE;
return new HeapNonNativeWritableMemoryImpl(
unsafeObj, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
}
}
@Override
BaseWritableBufferImpl toDuplicate(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | BUFFER | DUPLICATE | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new HeapWritableBufferImpl(
unsafeObj, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
} else {
typeIdOut |= NONNATIVE;
return new HeapNonNativeWritableBufferImpl(
unsafeObj, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
}
}
@Override
Object getUnsafeObject() {
return unsafeObj;
}
}
| 2,367 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/MapWritableMemoryImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.WritableMemory;
/**
* Implementation of {@link WritableMemory} for map memory, native byte order.
*
* @author Roman Leventov
* @author Lee Rhodes
*/
final class MapWritableMemoryImpl extends NativeWritableMemoryImpl {
private final AllocateDirectWritableMap dirWMap;
MapWritableMemoryImpl(
final AllocateDirectWritableMap dirWMap,
final long offsetBytes,
final long capacityBytes,
final int typeId,
final long cumOffsetBytes) {
super();
this.dirWMap = dirWMap;
this.offsetBytes = offsetBytes;
this.capacityBytes = capacityBytes;
this.typeId = removeNnBuf(typeId) | MAP | MEMORY | NATIVE;
this.cumOffsetBytes = cumOffsetBytes;
if ((this.owner != null) && (this.owner != Thread.currentThread())) {
throw new IllegalStateException(THREAD_EXCEPTION_TEXT);
}
this.owner = Thread.currentThread();
}
@Override
BaseWritableMemoryImpl toWritableRegion(
final long regionOffsetBytes,
final long capacityBytes,
final boolean readOnly,
final ByteOrder byteOrder) {
final long newOffsetBytes = offsetBytes + regionOffsetBytes;
final long newCumOffsetBytes = cumOffsetBytes + regionOffsetBytes;
int typeIdOut = removeNnBuf(typeId) | MAP | REGION | (readOnly ? READONLY : 0);
if (Util.isNativeByteOrder(byteOrder)) {
typeIdOut |= NATIVE;
return new MapWritableMemoryImpl(
dirWMap, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes);
} else {
typeIdOut |= NONNATIVE;
return new MapNonNativeWritableMemoryImpl(
dirWMap, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes);
}
}
@Override
BaseWritableBufferImpl toWritableBuffer(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | BUFFER | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new MapWritableBufferImpl(
dirWMap, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes);
} else {
typeIdOut |= NONNATIVE;
return new MapNonNativeWritableBufferImpl(
dirWMap, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes);
}
}
@Override
public void close() {
checkValid();
checkThread(owner);
dirWMap.close();
}
@Override
public void force() {
checkValid();
checkThread(owner);
checkNotReadOnly();
dirWMap.force(); //checksValidAndThread
}
@Override
Object getUnsafeObject() {
return null;
}
@Override
public boolean isLoaded() {
checkValid();
checkThread(owner);
return dirWMap.isLoaded(); //checksValidAndThread
}
@Override
public boolean isValid() {
return dirWMap.getValid().get();
}
@Override
public void load() {
checkValid();
checkThread(owner);
dirWMap.load(); //checksValidAndThread
}
}
| 2,368 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/NonNativeWritableBufferImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_DOUBLE_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_FLOAT_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.unsafe;
import org.apache.datasketches.memory.WritableBuffer;
/**
* Implementation of {@link WritableBuffer} for non-native endian byte order.
* @author Roman Leventov
* @author Lee Rhodes
*/
@SuppressWarnings("restriction")
abstract class NonNativeWritableBufferImpl extends BaseWritableBufferImpl {
//Pass-through constructor
NonNativeWritableBufferImpl(final long capacityBytes) { super(capacityBytes); }
//PRIMITIVE getX() and getXArray()
@Override
public char getChar() {
return Character.reverseBytes(getNativeOrderedChar());
}
@Override
public char getChar(final long offsetBytes) {
return Character.reverseBytes(getNativeOrderedChar(offsetBytes));
}
@Override
public void getCharArray(final char[] dstArray, final int dstOffsetChars, final int lengthChars) {
final long pos = getPosition();
final long copyBytes = ((long) lengthChars) << CHAR_SHIFT;
incrementAndCheckPositionForRead(pos, copyBytes);
CompareAndCopy.getNonNativeChars(getUnsafeObject(), getCumulativeOffset(pos), copyBytes,
dstArray, dstOffsetChars, lengthChars);
}
@Override
public double getDouble() {
final long pos = getPosition();
incrementAndCheckPositionForRead(pos, ARRAY_DOUBLE_INDEX_SCALE);
return Double.longBitsToDouble(
Long.reverseBytes(unsafe.getLong(getUnsafeObject(), getCumulativeOffset(pos))));
}
@Override
public double getDouble(final long offsetBytes) {
checkValidAndBounds(offsetBytes, ARRAY_DOUBLE_INDEX_SCALE);
return Double.longBitsToDouble(
Long.reverseBytes(unsafe.getLong(getUnsafeObject(), getCumulativeOffset(offsetBytes))));
}
@Override
public void getDoubleArray(final double[] dstArray, final int dstOffsetDoubles,
final int lengthDoubles) {
final long pos = getPosition();
final long copyBytes = ((long) lengthDoubles) << DOUBLE_SHIFT;
incrementAndCheckPositionForRead(pos, copyBytes);
CompareAndCopy.getNonNativeDoubles(getUnsafeObject(), getCumulativeOffset(pos), copyBytes,
dstArray, dstOffsetDoubles, lengthDoubles);
}
@Override
public float getFloat() {
final long pos = getPosition();
incrementAndCheckPositionForRead(pos, ARRAY_FLOAT_INDEX_SCALE);
return Float.intBitsToFloat(
Integer.reverseBytes(unsafe.getInt(getUnsafeObject(), getCumulativeOffset(pos))));
}
@Override
public float getFloat(final long offsetBytes) {
checkValidAndBounds(offsetBytes, ARRAY_FLOAT_INDEX_SCALE);
return Float.intBitsToFloat(
Integer.reverseBytes(unsafe.getInt(getUnsafeObject(), getCumulativeOffset(offsetBytes))));
}
@Override
public void getFloatArray(final float[] dstArray, final int dstOffsetFloats,
final int lengthFloats) {
final long pos = getPosition();
final long copyBytes = ((long) lengthFloats) << FLOAT_SHIFT;
incrementAndCheckPositionForRead(pos, copyBytes);
CompareAndCopy.getNonNativeFloats(getUnsafeObject(), getCumulativeOffset(pos), copyBytes,
dstArray, dstOffsetFloats, lengthFloats);
}
@Override
public int getInt() {
return Integer.reverseBytes(getNativeOrderedInt());
}
@Override
public int getInt(final long offsetBytes) {
return Integer.reverseBytes(getNativeOrderedInt(offsetBytes));
}
@Override
public void getIntArray(final int[] dstArray, final int dstOffsetInts, final int lengthInts) {
final long pos = getPosition();
final long copyBytes = ((long) lengthInts) << INT_SHIFT;
incrementAndCheckPositionForRead(pos, copyBytes);
CompareAndCopy.getNonNativeInts(getUnsafeObject(), getCumulativeOffset(pos), copyBytes,
dstArray, dstOffsetInts, lengthInts);
}
@Override
public long getLong() {
return Long.reverseBytes(getNativeOrderedLong());
}
@Override
public long getLong(final long offsetBytes) {
return Long.reverseBytes(getNativeOrderedLong(offsetBytes));
}
@Override
public void getLongArray(final long[] dstArray, final int dstOffsetLongs, final int lengthLongs) {
final long pos = getPosition();
final long copyBytes = ((long) lengthLongs) << LONG_SHIFT;
incrementAndCheckPositionForRead(pos, copyBytes);
CompareAndCopy.getNonNativeLongs(getUnsafeObject(), getCumulativeOffset(pos), copyBytes,
dstArray, dstOffsetLongs, lengthLongs);
}
@Override
public short getShort() {
return Short.reverseBytes(getNativeOrderedShort());
}
@Override
public short getShort(final long offsetBytes) {
return Short.reverseBytes(getNativeOrderedShort(offsetBytes));
}
@Override
public void getShortArray(final short[] dstArray, final int dstOffsetShorts,
final int lengthShorts) {
final long pos = getPosition();
final long copyBytes = ((long) lengthShorts) << SHORT_SHIFT;
incrementAndCheckPositionForRead(pos, copyBytes);
CompareAndCopy.getNonNativeShorts(getUnsafeObject(), getCumulativeOffset(pos), copyBytes,
dstArray, dstOffsetShorts, lengthShorts);
}
//PRIMITIVE putX() and putXArray()
@Override
public void putChar(final char value) {
putNativeOrderedChar(Character.reverseBytes(value));
}
@Override
public void putChar(final long offsetBytes, final char value) {
putNativeOrderedChar(offsetBytes, Character.reverseBytes(value));
}
@Override
public void putCharArray(final char[] srcArray, final int srcOffsetChars, final int lengthChars) {
final long pos = getPosition();
final long copyBytes = ((long) lengthChars) << CHAR_SHIFT;
incrementAndCheckPositionForWrite(pos, copyBytes);
CompareAndCopy.putNonNativeChars(srcArray, srcOffsetChars, lengthChars, copyBytes,
getUnsafeObject(), getCumulativeOffset(pos));
}
@Override
public void putDouble(final double value) {
final long pos = getPosition();
incrementAndCheckPositionForWrite(pos, ARRAY_DOUBLE_INDEX_SCALE);
unsafe.putLong(getUnsafeObject(), getCumulativeOffset(pos),
Long.reverseBytes(Double.doubleToRawLongBits(value)));
}
@Override
public void putDouble(final long offsetBytes, final double value) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_DOUBLE_INDEX_SCALE);
unsafe.putLong(getUnsafeObject(), getCumulativeOffset(offsetBytes),
Long.reverseBytes(Double.doubleToRawLongBits(value)));
}
@Override
public void putDoubleArray(final double[] srcArray, final int srcOffsetDoubles,
final int lengthDoubles) {
final long pos = getPosition();
final long copyBytes = ((long) lengthDoubles) << DOUBLE_SHIFT;
incrementAndCheckPositionForWrite(pos, copyBytes);
CompareAndCopy.putNonNativeDoubles(srcArray, srcOffsetDoubles, lengthDoubles, copyBytes,
getUnsafeObject(), getCumulativeOffset(pos));
}
@Override
public void putFloat(final float value) {
final long pos = getPosition();
incrementAndCheckPositionForWrite(pos, ARRAY_FLOAT_INDEX_SCALE);
unsafe.putInt(getUnsafeObject(), getCumulativeOffset(pos),
Integer.reverseBytes(Float.floatToRawIntBits(value)));
}
@Override
public void putFloat(final long offsetBytes, final float value) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_FLOAT_INDEX_SCALE);
unsafe.putInt(getUnsafeObject(), getCumulativeOffset(offsetBytes),
Integer.reverseBytes(Float.floatToRawIntBits(value)));
}
@Override
public void putFloatArray(final float[] srcArray, final int srcOffsetFloats,
final int lengthFloats) {
final long pos = getPosition();
final long copyBytes = ((long) lengthFloats) << FLOAT_SHIFT;
incrementAndCheckPositionForWrite(pos, copyBytes);
CompareAndCopy.putNonNativeFloats(srcArray, srcOffsetFloats, lengthFloats, copyBytes,
getUnsafeObject(), getCumulativeOffset(pos));
}
@Override
public void putInt(final int value) {
putNativeOrderedInt(Integer.reverseBytes(value));
}
@Override
public void putInt(final long offsetBytes, final int value) {
putNativeOrderedInt(offsetBytes, Integer.reverseBytes(value));
}
@Override
public void putIntArray(final int[] srcArray, final int srcOffsetInts, final int lengthInts) {
final long pos = getPosition();
final long copyBytes = ((long) lengthInts) << INT_SHIFT;
incrementAndCheckPositionForWrite(pos, copyBytes);
CompareAndCopy.putNonNativeInts(srcArray, srcOffsetInts, lengthInts, copyBytes,
getUnsafeObject(), getCumulativeOffset(pos));
}
@Override
public void putLong(final long value) {
putNativeOrderedLong(Long.reverseBytes(value));
}
@Override
public void putLong(final long offsetBytes, final long value) {
putNativeOrderedLong(offsetBytes, Long.reverseBytes(value));
}
@Override
public void putLongArray(final long[] srcArray, final int srcOffsetLongs, final int lengthLongs) {
final long pos = getPosition();
final long copyBytes = ((long) lengthLongs) << LONG_SHIFT;
incrementAndCheckPositionForWrite(pos, copyBytes);
CompareAndCopy.putNonNativeLongs(srcArray, srcOffsetLongs, lengthLongs, copyBytes,
getUnsafeObject(), getCumulativeOffset(pos));
}
@Override
public void putShort(final short value) {
putNativeOrderedShort(Short.reverseBytes(value));
}
@Override
public void putShort(final long offsetBytes, final short value) {
putNativeOrderedShort(offsetBytes, Short.reverseBytes(value));
}
@Override
public void putShortArray(final short[] srcArray, final int srcOffsetShorts,
final int lengthShorts) {
final long pos = getPosition();
final long copyBytes = ((long) lengthShorts) << SHORT_SHIFT;
incrementAndCheckPositionForWrite(pos, copyBytes);
CompareAndCopy.putNonNativeShorts(srcArray, srcOffsetShorts, lengthShorts, copyBytes,
getUnsafeObject(), getCumulativeOffset(pos));
}
}
| 2,369 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/UnsafeUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import sun.misc.Unsafe;
/**
* Provides access to the sun.misc.Unsafe class and its key static fields.
*
* @author Lee Rhodes
*/
@SuppressWarnings({"restriction"})
public final class UnsafeUtil {
public static final Unsafe unsafe;
public static final String JDK; //must be at least "1.8"
public static final int JDK_MAJOR; //8, 9, 10, 11, 12, etc
//not an indicator of whether compressed references are used.
public static final int ADDRESS_SIZE;
//For 64-bit JVMs: these offsets vary depending on coop: 16 for JVM <= 32GB; 24 for JVM > 32GB.
// Making this constant long-typed, rather than int, to exclude possibility of accidental overflow
// in expressions like arrayLength * ARRAY_BYTE_BASE_OFFSET, where arrayLength is int-typed.
// The same consideration for constants below: ARRAY_*_INDEX_SCALE, ARRAY_*_INDEX_SHIFT.
public static final long ARRAY_BOOLEAN_BASE_OFFSET;
public static final long ARRAY_BYTE_BASE_OFFSET;
public static final long ARRAY_SHORT_BASE_OFFSET;
public static final long ARRAY_CHAR_BASE_OFFSET;
public static final long ARRAY_INT_BASE_OFFSET;
public static final long ARRAY_LONG_BASE_OFFSET;
public static final long ARRAY_FLOAT_BASE_OFFSET;
public static final long ARRAY_DOUBLE_BASE_OFFSET;
public static final long ARRAY_OBJECT_BASE_OFFSET;
//@formatter:off
// Setting those values directly instead of using unsafe.arrayIndexScale(), because it may be
// beneficial for runtime execution, those values are backed into generated machine code as
// constants. E. g. see https://shipilev.net/jvm-anatomy-park/14-constant-variables/
public static final int ARRAY_BOOLEAN_INDEX_SCALE = 1;
public static final int ARRAY_BYTE_INDEX_SCALE = 1;
public static final long ARRAY_SHORT_INDEX_SCALE = 2;
public static final long ARRAY_CHAR_INDEX_SCALE = 2;
public static final long ARRAY_INT_INDEX_SCALE = 4;
public static final long ARRAY_LONG_INDEX_SCALE = 8;
public static final long ARRAY_FLOAT_INDEX_SCALE = 4;
public static final long ARRAY_DOUBLE_INDEX_SCALE = 8;
public static final long ARRAY_OBJECT_INDEX_SCALE; // varies, 4 or 8 depending on coop
//Used to convert "type" to bytes: bytes = longs << LONG_SHIFT
public static final int BOOLEAN_SHIFT = 0;
public static final int BYTE_SHIFT = 0;
public static final long SHORT_SHIFT = 1;
public static final long CHAR_SHIFT = 1;
public static final long INT_SHIFT = 2;
public static final long LONG_SHIFT = 3;
public static final long FLOAT_SHIFT = 2;
public static final long DOUBLE_SHIFT = 3;
public static final long OBJECT_SHIFT; // varies, 2 or 3 depending on coop
public static final String LS = System.getProperty("line.separator");
//@formatter:on
static {
try {
final Constructor<Unsafe> unsafeConstructor = Unsafe.class.getDeclaredConstructor();
unsafeConstructor.setAccessible(true);
unsafe = unsafeConstructor.newInstance();
// Alternative, but may not work across different JVMs.
// Field field = Unsafe.class.getDeclaredField("theUnsafe");
// field.setAccessible(true);
// unsafe = (Unsafe) field.get(null);
} catch (final InstantiationException | IllegalAccessException | IllegalArgumentException
| InvocationTargetException | NoSuchMethodException e) {
e.printStackTrace();
throw new RuntimeException("Unable to acquire Unsafe. " + e);
}
//4 on 32-bit systems. 4 on 64-bit systems < 32GB, otherwise 8.
//This alone is not an indicator of compressed ref (coop)
ADDRESS_SIZE = unsafe.addressSize();
ARRAY_BOOLEAN_BASE_OFFSET = unsafe.arrayBaseOffset(boolean[].class);
ARRAY_BYTE_BASE_OFFSET = unsafe.arrayBaseOffset(byte[].class);
ARRAY_SHORT_BASE_OFFSET = unsafe.arrayBaseOffset(short[].class);
ARRAY_CHAR_BASE_OFFSET = unsafe.arrayBaseOffset(char[].class);
ARRAY_INT_BASE_OFFSET = unsafe.arrayBaseOffset(int[].class);
ARRAY_LONG_BASE_OFFSET = unsafe.arrayBaseOffset(long[].class);
ARRAY_FLOAT_BASE_OFFSET = unsafe.arrayBaseOffset(float[].class);
ARRAY_DOUBLE_BASE_OFFSET = unsafe.arrayBaseOffset(double[].class);
ARRAY_OBJECT_BASE_OFFSET = unsafe.arrayBaseOffset(Object[].class);
ARRAY_OBJECT_INDEX_SCALE = unsafe.arrayIndexScale(Object[].class);
OBJECT_SHIFT = ARRAY_OBJECT_INDEX_SCALE == 4 ? 2 : 3;
final String jdkVer = System.getProperty("java.version");
final int[] p = parseJavaVersion(jdkVer);
JDK = p[0] + "." + p[1];
JDK_MAJOR = (p[0] == 1) ? p[1] : p[0];
}
private UnsafeUtil() {}
/**
* Returns first two number groups of the java version string.
* @param jdkVer the java version string from System.getProperty("java.version").
* @return first two number groups of the java version string.
*/
public static int[] parseJavaVersion(final String jdkVer) {
final int p0, p1;
try {
String[] parts = jdkVer.trim().split("[^0-9\\.]");//grab only number groups and "."
parts = parts[0].split("\\."); //split out the number groups
p0 = Integer.parseInt(parts[0]); //the first number group
p1 = (parts.length > 1) ? Integer.parseInt(parts[1]) : 0; //2nd number group, or 0
} catch (final NumberFormatException | ArrayIndexOutOfBoundsException e) {
throw new IllegalArgumentException("Improper Java -version string: " + jdkVer + "\n" + e);
}
checkJavaVersion(jdkVer, p0, p1);
return new int[] {p0, p1};
}
public static void checkJavaVersion(final String jdkVer, final int p0, final int p1) {
if ( (p0 < 1) || ((p0 == 1) && (p1 < 8)) || (p0 > 13) ) {
throw new IllegalArgumentException(
"Unsupported JDK Major Version, must be one of 1.8, 8, 11, 17: " + jdkVer);
}
}
public static long getFieldOffset(final Class<?> c, final String fieldName) {
try {
return unsafe.objectFieldOffset(c.getDeclaredField(fieldName));
} catch (final NoSuchFieldException e) {
throw new IllegalStateException(e + ": " + fieldName);
}
}
/**
* Like {@link Unsafe#arrayBaseOffset(Class)}, but caches return values for common array types.
* Useful because calling {@link Unsafe#arrayBaseOffset(Class)} directly incurs more overhead.
* @param c The given Class<?>.
* @return the base-offset
*/
public static long getArrayBaseOffset(final Class<?> c) {
// Ordering here is roughly in order of what we expect to be most popular.
if (c == byte[].class) {
return ARRAY_BYTE_BASE_OFFSET;
} else if (c == int[].class) {
return ARRAY_INT_BASE_OFFSET;
} else if (c == long[].class) {
return ARRAY_LONG_BASE_OFFSET;
} else if (c == float[].class) {
return ARRAY_FLOAT_BASE_OFFSET;
} else if (c == double[].class) {
return ARRAY_DOUBLE_BASE_OFFSET;
} else if (c == boolean[].class) {
return ARRAY_BOOLEAN_BASE_OFFSET;
} else if (c == short[].class) {
return ARRAY_SHORT_BASE_OFFSET;
} else if (c == char[].class) {
return ARRAY_CHAR_BASE_OFFSET;
} else if (c == Object[].class) {
return ARRAY_OBJECT_BASE_OFFSET;
} else {
return unsafe.arrayBaseOffset(c);
}
}
}
| 2,370 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/BBNonNativeWritableMemoryImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableMemory;
/**
* Implementation of {@link WritableMemory} for ByteBuffer, non-native byte order.
*
* @author Roman Leventov
* @author Lee Rhodes
*/
final class BBNonNativeWritableMemoryImpl extends NonNativeWritableMemoryImpl {
private final ByteBuffer byteBuf; //holds a reference to a ByteBuffer until we are done with it.
private final Object unsafeObj;
private final long nativeBaseOffset; //raw off-heap address of allocation base if ByteBuffer direct, else 0
BBNonNativeWritableMemoryImpl(
final Object unsafeObj,
final long nativeBaseOffset,
final long offsetBytes,
final long capacityBytes,
final int typeId,
final long cumOffsetBytes,
final MemoryRequestServer memReqSvr,
final ByteBuffer byteBuf) {
super();
this.unsafeObj = unsafeObj;
this.nativeBaseOffset = nativeBaseOffset;
this.offsetBytes = offsetBytes;
this.capacityBytes = capacityBytes;
this.typeId = removeNnBuf(typeId) | BYTEBUF | MEMORY | NONNATIVE;
this.cumOffsetBytes = cumOffsetBytes;
this.memReqSvr = memReqSvr; //in ResourceImpl
this.byteBuf = byteBuf;
if ((this.owner != null) && (this.owner != Thread.currentThread())) {
throw new IllegalStateException(THREAD_EXCEPTION_TEXT);
}
this.owner = Thread.currentThread();
}
@Override
BaseWritableMemoryImpl toWritableRegion(
final long regionOffsetBytes,
final long capacityBytes,
final boolean readOnly,
final ByteOrder byteOrder) {
final long newOffsetBytes = offsetBytes + regionOffsetBytes;
final long newCumOffsetBytes = cumOffsetBytes + regionOffsetBytes;
int typeIdOut = removeNnBuf(typeId) | MEMORY | REGION | (readOnly ? READONLY : 0);
if (Util.isNativeByteOrder(byteOrder)) {
typeIdOut |= NATIVE;
return new BBWritableMemoryImpl(
unsafeObj, nativeBaseOffset, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr, byteBuf);
} else {
typeIdOut |= NONNATIVE;
return new BBNonNativeWritableMemoryImpl(
unsafeObj, nativeBaseOffset, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr, byteBuf);
}
}
@Override
BaseWritableBufferImpl toWritableBuffer(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | BUFFER | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new BBWritableBufferImpl(
unsafeObj, nativeBaseOffset, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr, byteBuf);
} else {
typeIdOut |= NONNATIVE;
return new BBNonNativeWritableBufferImpl(
unsafeObj, nativeBaseOffset, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr, byteBuf);
}
}
@Override
public ByteBuffer getByteBuffer() {
return byteBuf;
}
@Override
Object getUnsafeObject() {
return unsafeObj;
}
}
| 2,371 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/Prim.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_BOOLEAN_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_BOOLEAN_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_BYTE_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_BYTE_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_CHAR_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_CHAR_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_DOUBLE_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_DOUBLE_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_FLOAT_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_FLOAT_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_INT_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_INT_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_LONG_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_LONG_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_OBJECT_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_OBJECT_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_SHORT_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_SHORT_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.BOOLEAN_SHIFT;
import static org.apache.datasketches.memory.internal.UnsafeUtil.BYTE_SHIFT;
import static org.apache.datasketches.memory.internal.UnsafeUtil.CHAR_SHIFT;
import static org.apache.datasketches.memory.internal.UnsafeUtil.DOUBLE_SHIFT;
import static org.apache.datasketches.memory.internal.UnsafeUtil.FLOAT_SHIFT;
import static org.apache.datasketches.memory.internal.UnsafeUtil.INT_SHIFT;
import static org.apache.datasketches.memory.internal.UnsafeUtil.LONG_SHIFT;
import static org.apache.datasketches.memory.internal.UnsafeUtil.OBJECT_SHIFT;
import static org.apache.datasketches.memory.internal.UnsafeUtil.SHORT_SHIFT;
/**
* Creates easy to access association between the major Unsafe constants.
*
* @author Lee Rhodes
*/
public enum Prim {
BOOLEAN(ARRAY_BOOLEAN_BASE_OFFSET, ARRAY_BOOLEAN_INDEX_SCALE, BOOLEAN_SHIFT),
BYTE(ARRAY_BYTE_BASE_OFFSET, ARRAY_BYTE_INDEX_SCALE, BYTE_SHIFT),
CHAR(ARRAY_CHAR_BASE_OFFSET, ARRAY_CHAR_INDEX_SCALE, CHAR_SHIFT),
SHORT(ARRAY_SHORT_BASE_OFFSET, ARRAY_SHORT_INDEX_SCALE, SHORT_SHIFT),
INT(ARRAY_INT_BASE_OFFSET, ARRAY_INT_INDEX_SCALE, INT_SHIFT),
LONG(ARRAY_LONG_BASE_OFFSET, ARRAY_LONG_INDEX_SCALE, LONG_SHIFT),
FLOAT(ARRAY_FLOAT_BASE_OFFSET, ARRAY_FLOAT_INDEX_SCALE, FLOAT_SHIFT),
DOUBLE(ARRAY_DOUBLE_BASE_OFFSET, ARRAY_DOUBLE_INDEX_SCALE, DOUBLE_SHIFT),
OBJECT(ARRAY_OBJECT_BASE_OFFSET, ARRAY_OBJECT_INDEX_SCALE, OBJECT_SHIFT);
private final long arrBaseOff_;
private final long arrIdxScale_;
private final long sizeShift_;
private Prim(final long arrBaseOff, final long arrIdxScale, final long sizeShift) {
this.arrBaseOff_ = arrBaseOff;
this.arrIdxScale_ = arrIdxScale;
this.sizeShift_ = sizeShift;
}
public long off() {
return arrBaseOff_;
}
public long scale() {
return arrIdxScale_;
}
public long shift() {
return sizeShift_;
}
}
| 2,372 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/XxHash64.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_BOOLEAN_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_BYTE_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_CHAR_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_DOUBLE_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_FLOAT_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_INT_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_LONG_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_SHORT_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.CHAR_SHIFT;
import static org.apache.datasketches.memory.internal.UnsafeUtil.DOUBLE_SHIFT;
import static org.apache.datasketches.memory.internal.UnsafeUtil.FLOAT_SHIFT;
import static org.apache.datasketches.memory.internal.UnsafeUtil.INT_SHIFT;
import static org.apache.datasketches.memory.internal.UnsafeUtil.LONG_SHIFT;
import static org.apache.datasketches.memory.internal.UnsafeUtil.SHORT_SHIFT;
import static org.apache.datasketches.memory.internal.UnsafeUtil.unsafe;
/**
* The XxHash is a fast, non-cryptographic, 64-bit hash function that has
* excellent avalanche and 2-way bit independence properties.
* This java version adapted the C++ version and the OpenHFT/Zero-Allocation-Hashing implementation
* referenced below as inspiration.
*
* <p>The C++ source repository:
* <a href="https://github.com/Cyan4973/xxHash">
* https://github.com/Cyan4973/xxHash</a>. It has a BSD 2-Clause License:
* <a href="http://www.opensource.org/licenses/bsd-license.php">
* http://www.opensource.org/licenses/bsd-license.php</a>. See LICENSE.
*
* <p>Portions of this code were adapted from
* <a href="https://github.com/OpenHFT/Zero-Allocation-Hashing/blob/master/src/main/java/net/openhft/hashing/XxHash.java">
* OpenHFT/Zero-Allocation-Hashing</a>, which has an Apache 2 license as does this site. See LICENSE.
*
* @author Lee Rhodes
*/
@SuppressWarnings("restriction")
public class XxHash64 {
// Unsigned, 64-bit primes
private static final long P1 = -7046029288634856825L;
private static final long P2 = -4417276706812531889L;
private static final long P3 = 1609587929392839161L;
private static final long P4 = -8796714831421723037L;
private static final long P5 = 2870177450012600261L;
/**
* Returns the 64-bit hash of the sequence of bytes in the unsafeObject specified by
* <i>cumOffsetBytes</i>, <i>lengthBytes</i> and a <i>seed</i>.
*
* @param unsafeObj A reference to the object parameter required by unsafe. It may be null.
* @param cumOffsetBytes cumulative offset in bytes of this object from the backing resource
* including any user given offsetBytes. This offset may also include other offset components
* such as the native off-heap memory address, DirectByteBuffer split offsets, region offsets,
* and unsafe arrayBaseOffsets.
* @param lengthBytes the length in bytes of the sequence to be hashed
* @param seed a given seed
* @return the 64-bit hash of the sequence of bytes in the unsafeObject specified by
* <i>cumOffsetBytes</i>, <i>lengthBytes</i> and a <i>seed</i>.
*/
static long hash(final Object unsafeObj, long cumOffsetBytes, final long lengthBytes,
final long seed) {
long hash;
long remaining = lengthBytes;
if (remaining >= 32) {
long v1 = seed + P1 + P2;
long v2 = seed + P2;
long v3 = seed;
long v4 = seed - P1;
do {
v1 += unsafe.getLong(unsafeObj, cumOffsetBytes) * P2;
v1 = Long.rotateLeft(v1, 31);
v1 *= P1;
v2 += unsafe.getLong(unsafeObj, cumOffsetBytes + 8L) * P2;
v2 = Long.rotateLeft(v2, 31);
v2 *= P1;
v3 += unsafe.getLong(unsafeObj, cumOffsetBytes + 16L) * P2;
v3 = Long.rotateLeft(v3, 31);
v3 *= P1;
v4 += unsafe.getLong(unsafeObj, cumOffsetBytes + 24L) * P2;
v4 = Long.rotateLeft(v4, 31);
v4 *= P1;
cumOffsetBytes += 32;
remaining -= 32;
} while (remaining >= 32);
hash = Long.rotateLeft(v1, 1)
+ Long.rotateLeft(v2, 7)
+ Long.rotateLeft(v3, 12)
+ Long.rotateLeft(v4, 18);
v1 *= P2;
v1 = Long.rotateLeft(v1, 31);
v1 *= P1;
hash ^= v1;
hash = (hash * P1) + P4;
v2 *= P2;
v2 = Long.rotateLeft(v2, 31);
v2 *= P1;
hash ^= v2;
hash = (hash * P1) + P4;
v3 *= P2;
v3 = Long.rotateLeft(v3, 31);
v3 *= P1;
hash ^= v3;
hash = (hash * P1) + P4;
v4 *= P2;
v4 = Long.rotateLeft(v4, 31);
v4 *= P1;
hash ^= v4;
hash = (hash * P1) + P4;
} //end remaining >= 32
else {
hash = seed + P5;
}
hash += lengthBytes;
while (remaining >= 8) {
long k1 = unsafe.getLong(unsafeObj, cumOffsetBytes);
k1 *= P2;
k1 = Long.rotateLeft(k1, 31);
k1 *= P1;
hash ^= k1;
hash = (Long.rotateLeft(hash, 27) * P1) + P4;
cumOffsetBytes += 8;
remaining -= 8;
}
if (remaining >= 4) { //treat as unsigned ints
hash ^= (unsafe.getInt(unsafeObj, cumOffsetBytes) & 0XFFFF_FFFFL) * P1;
hash = (Long.rotateLeft(hash, 23) * P2) + P3;
cumOffsetBytes += 4;
remaining -= 4;
}
while (remaining != 0) { //treat as unsigned bytes
hash ^= (unsafe.getByte(unsafeObj, cumOffsetBytes) & 0XFFL) * P5;
hash = Long.rotateLeft(hash, 11) * P1;
--remaining;
++cumOffsetBytes;
}
return finalize(hash);
}
/**
* Returns a 64-bit hash from a single long. This method has been optimized for speed when only
* a single hash of a long is required.
* @param in A long.
* @param seed A long valued seed.
* @return the hash.
*/
public static long hash(final long in, final long seed) {
long hash = seed + P5;
hash += 8;
long k1 = in;
k1 *= P2;
k1 = Long.rotateLeft(k1, 31);
k1 *= P1;
hash ^= k1;
hash = (Long.rotateLeft(hash, 27) * P1) + P4;
return finalize(hash);
}
private static long finalize(long hash) {
hash ^= hash >>> 33;
hash *= P2;
hash ^= hash >>> 29;
hash *= P3;
hash ^= hash >>> 32;
return hash;
}
/**
* Hash the given arr starting at the given offset and continuing for the given length using the
* given seed.
* @param arr the given array
* @param offsetBooleans starting at this offset
* @param lengthBooleans continuing for this length
* @param seed the given seed
* @return the hash
*/
public static long hashBooleans(final boolean[] arr, final long offsetBooleans,
final long lengthBooleans, final long seed) {
return hash(arr, ARRAY_BOOLEAN_BASE_OFFSET + offsetBooleans, lengthBooleans, seed);
}
/**
* Hash the given arr starting at the given offset and continuing for the given length using the
* given seed.
* @param arr the given array
* @param offsetBytes starting at this offset
* @param lengthBytes continuing for this length
* @param seed the given seed
* @return the hash
*/
public static long hashBytes(final byte[] arr, final long offsetBytes,
final long lengthBytes, final long seed) {
return hash(arr, ARRAY_BYTE_BASE_OFFSET + offsetBytes, lengthBytes, seed);
}
/**
* Hash the given arr starting at the given offset and continuing for the given length using the
* given seed.
* @param arr the given array
* @param offsetShorts starting at this offset
* @param lengthShorts continuing for this length
* @param seed the given seed
* @return the hash
*/
public static long hashShorts(final short[] arr, final long offsetShorts,
final long lengthShorts, final long seed) {
return hash(arr, ARRAY_SHORT_BASE_OFFSET + (offsetShorts << SHORT_SHIFT),
lengthShorts << SHORT_SHIFT, seed);
}
/**
* Hash the given arr starting at the given offset and continuing for the given length using the
* given seed.
* @param arr the given array
* @param offsetChars starting at this offset
* @param lengthChars continuing for this length
* @param seed the given seed
* @return the hash
*/
public static long hashChars(final char[] arr, final long offsetChars,
final long lengthChars, final long seed) {
return hash(arr, ARRAY_CHAR_BASE_OFFSET + (offsetChars << CHAR_SHIFT),
lengthChars << CHAR_SHIFT, seed);
}
/**
* Hash the given arr starting at the given offset and continuing for the given length using the
* given seed.
* @param arr the given array
* @param offsetInts starting at this offset
* @param lengthInts continuing for this length
* @param seed the given seed
* @return the hash
*/
public static long hashInts(final int[] arr, final long offsetInts,
final long lengthInts, final long seed) {
return hash(arr, ARRAY_INT_BASE_OFFSET + (offsetInts << INT_SHIFT),
lengthInts << INT_SHIFT, seed);
}
/**
* Hash the given arr starting at the given offset and continuing for the given length using the
* given seed.
* @param arr the given array
* @param offsetLongs starting at this offset
* @param lengthLongs continuing for this length
* @param seed the given seed
* @return the hash
*/
public static long hashLongs(final long[] arr, final long offsetLongs,
final long lengthLongs, final long seed) {
return hash(arr, ARRAY_LONG_BASE_OFFSET + (offsetLongs << LONG_SHIFT),
lengthLongs << LONG_SHIFT, seed);
}
/**
* Hash the given arr starting at the given offset and continuing for the given length using the
* given seed.
* @param arr the given array
* @param offsetFloats starting at this offset
* @param lengthFloats continuing for this length
* @param seed the given seed
* @return the hash
*/
public static long hashFloats(final float[] arr, final long offsetFloats,
final long lengthFloats, final long seed) {
return hash(arr, ARRAY_FLOAT_BASE_OFFSET + (offsetFloats << FLOAT_SHIFT),
lengthFloats << FLOAT_SHIFT, seed);
}
/**
* Hash the given arr starting at the given offset and continuing for the given length using the
* given seed.
* @param arr the given array
* @param offsetDoubles starting at this offset
* @param lengthDoubles continuing for this length
* @param seed the given seed
* @return the hash
*/
public static long hashDoubles(final double[] arr, final long offsetDoubles,
final long lengthDoubles, final long seed) {
return hash(arr, ARRAY_DOUBLE_BASE_OFFSET + (offsetDoubles << DOUBLE_SHIFT),
lengthDoubles << DOUBLE_SHIFT, seed);
}
/**
* Hash the given arr starting at the given offset and continuing for the given length using the
* given seed.
* @param str the given string
* @param offsetChars starting at this offset
* @param lengthChars continuing for this length
* @param seed the given seed
* @return the hash
*/
public static long hashString(final String str, final long offsetChars,
final long lengthChars, final long seed) {
return hashChars(str.toCharArray(), offsetChars, lengthChars, seed);
}
}
| 2,373 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/MemoryCleaner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import sun.misc.Cleaner;
/**
* Extracts a version-dependent reference to the `sun.misc.Cleaner` into
* a standalone class. The package name for Cleaner has changed in
* later versions. The appropriate class will be loaded by the class loader
* depending on the Java version that is used.
* For more information, see: https://openjdk.java.net/jeps/238
*/
@SuppressWarnings("restriction")
public class MemoryCleaner {
private final Cleaner cleaner;
/**
* Creates a new `sun.misc.Cleaner`.
* @param referent the object to be cleaned
* @param deallocator - the cleanup code to be run when the cleaner is invoked.
* return MemoryCleaner
*/
public MemoryCleaner(final Object referent, final Runnable deallocator) {
cleaner = Cleaner.create(referent, deallocator);
}
/**
* Runs this cleaner, if it has not been run before.
*/
public void clean() {
cleaner.clean();
}
}
| 2,374 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/DirectNonNativeWritableBufferImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import java.nio.ByteOrder;
import org.apache.datasketches.memory.MemoryRequestServer;
import org.apache.datasketches.memory.WritableBuffer;
/**
* Implementation of {@link WritableBuffer} for direct memory, non-native byte order.
*
* @author Roman Leventov
* @author Lee Rhodes
*/
final class DirectNonNativeWritableBufferImpl extends NonNativeWritableBufferImpl {
private final AllocateDirect direct;
DirectNonNativeWritableBufferImpl(
final AllocateDirect direct,
final long offsetBytes,
final long capacityBytes,
final int typeId,
final long cumOffsetBytes,
final MemoryRequestServer memReqSvr) {
super(capacityBytes);
this.direct = direct;
this.offsetBytes = offsetBytes;
this.capacityBytes = capacityBytes;
this.typeId = removeNnBuf(typeId) | DIRECT | BUFFER | NONNATIVE; //initially cannot be ReadOnly
this.cumOffsetBytes = cumOffsetBytes;
this.memReqSvr = memReqSvr; //in ResourceImpl
if ((this.owner != null) && (this.owner != Thread.currentThread())) {
throw new IllegalStateException(THREAD_EXCEPTION_TEXT);
}
this.owner = Thread.currentThread();
}
@Override
BaseWritableBufferImpl toWritableRegion(
final long regionOffsetBytes,
final long capacityBytes,
final boolean readOnly,
final ByteOrder byteOrder) {
final long newOffsetBytes = offsetBytes + regionOffsetBytes;
final long newCumOffsetBytes = cumOffsetBytes + regionOffsetBytes;
int typeIdOut = removeNnBuf(typeId) | BUFFER | REGION | (readOnly ? READONLY : 0);
if (Util.isNativeByteOrder(byteOrder)) {
typeIdOut |= NATIVE;
return new DirectWritableBufferImpl(
direct, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr);
} else {
typeIdOut |= NONNATIVE;
return new DirectNonNativeWritableBufferImpl(
direct, newOffsetBytes, capacityBytes, typeIdOut, newCumOffsetBytes, memReqSvr);
}
}
@Override
BaseWritableMemoryImpl toWritableMemory(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | MEMORY | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new DirectWritableMemoryImpl(
direct, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
} else {
typeIdOut |= NONNATIVE;
return new DirectNonNativeWritableMemoryImpl(
direct, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
}
}
@Override
BaseWritableBufferImpl toDuplicate(final boolean readOnly, final ByteOrder byteOrder) {
int typeIdOut = removeNnBuf(typeId) | BUFFER | DUPLICATE | (readOnly ? READONLY : 0);
if (byteOrder == ByteOrder.nativeOrder()) {
typeIdOut |= NATIVE;
return new DirectWritableBufferImpl(
direct, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
} else {
typeIdOut |= NONNATIVE;
return new DirectNonNativeWritableBufferImpl(
direct, offsetBytes, capacityBytes, typeIdOut, cumOffsetBytes, memReqSvr);
}
}
@Override
public void close() {
checkValid();
checkThread(owner);
direct.close();
}
@Override
Object getUnsafeObject() {
return null;
}
@Override
public boolean isValid() {
return direct.getValid().get();
}
}
| 2,375 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/NativeWritableBufferImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_CHAR_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_DOUBLE_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_DOUBLE_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_FLOAT_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_FLOAT_INDEX_SCALE;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_INT_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_LONG_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.ARRAY_SHORT_BASE_OFFSET;
import static org.apache.datasketches.memory.internal.UnsafeUtil.unsafe;
import org.apache.datasketches.memory.WritableBuffer;
/**
* Implementation of {@link WritableBuffer} for native endian byte order.
* @author Roman Leventov
* @author Lee Rhodes
*/
@SuppressWarnings("restriction")
abstract class NativeWritableBufferImpl extends BaseWritableBufferImpl {
//Pass-through constructor
NativeWritableBufferImpl(final long capacityBytes) { super(capacityBytes); }
//PRIMITIVE getX() and getXArray()
@Override
public char getChar() {
return getNativeOrderedChar();
}
@Override
public char getChar(final long offsetBytes) {
return getNativeOrderedChar(offsetBytes);
}
@Override
public void getCharArray(final char[] dstArray, final int dstOffsetChars, final int lengthChars) {
final long pos = getPosition();
final long copyBytes = ((long) lengthChars) << CHAR_SHIFT;
incrementAndCheckPositionForRead(pos, copyBytes);
ResourceImpl.checkBounds(dstOffsetChars, lengthChars, dstArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
getUnsafeObject(),
getCumulativeOffset(pos),
dstArray,
ARRAY_CHAR_BASE_OFFSET + (((long) dstOffsetChars) << CHAR_SHIFT),
copyBytes);
}
@Override
public double getDouble() {
final long pos = getPosition();
incrementAndCheckPositionForRead(pos, ARRAY_DOUBLE_INDEX_SCALE);
return unsafe.getDouble(getUnsafeObject(), getCumulativeOffset(pos));
}
@Override
public double getDouble(final long offsetBytes) {
checkValidAndBounds(offsetBytes, ARRAY_DOUBLE_INDEX_SCALE);
return unsafe.getDouble(getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
@Override
public void getDoubleArray(final double[] dstArray, final int dstOffsetDoubles,
final int lengthDoubles) {
final long pos = getPosition();
final long copyBytes = ((long) lengthDoubles) << DOUBLE_SHIFT;
incrementAndCheckPositionForRead(pos, copyBytes);
ResourceImpl.checkBounds(dstOffsetDoubles, lengthDoubles, dstArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
getUnsafeObject(),
getCumulativeOffset(pos),
dstArray,
ARRAY_DOUBLE_BASE_OFFSET + (((long) dstOffsetDoubles) << DOUBLE_SHIFT),
copyBytes);
}
@Override
public float getFloat() {
final long pos = getPosition();
incrementAndCheckPositionForRead(pos, ARRAY_FLOAT_INDEX_SCALE);
return unsafe.getFloat(getUnsafeObject(), getCumulativeOffset(pos));
}
@Override
public float getFloat(final long offsetBytes) {
checkValidAndBounds(offsetBytes, ARRAY_FLOAT_INDEX_SCALE);
return unsafe.getFloat(getUnsafeObject(), getCumulativeOffset(offsetBytes));
}
@Override
public void getFloatArray(final float[] dstArray, final int dstOffsetFloats,
final int lengthFloats) {
final long pos = getPosition();
final long copyBytes = ((long) lengthFloats) << FLOAT_SHIFT;
incrementAndCheckPositionForRead(pos, copyBytes);
ResourceImpl.checkBounds(dstOffsetFloats, lengthFloats, dstArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
getUnsafeObject(),
getCumulativeOffset(pos),
dstArray,
ARRAY_FLOAT_BASE_OFFSET + (((long) dstOffsetFloats) << FLOAT_SHIFT),
copyBytes);
}
@Override
public int getInt() {
return getNativeOrderedInt();
}
@Override
public int getInt(final long offsetBytes) {
return getNativeOrderedInt(offsetBytes);
}
@Override
public void getIntArray(final int[] dstArray, final int dstOffsetInts, final int lengthInts) {
final long pos = getPosition();
final long copyBytes = ((long) lengthInts) << INT_SHIFT;
incrementAndCheckPositionForRead(pos, copyBytes);
ResourceImpl.checkBounds(dstOffsetInts, lengthInts, dstArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
getUnsafeObject(),
getCumulativeOffset(pos),
dstArray,
ARRAY_INT_BASE_OFFSET + (((long) dstOffsetInts) << INT_SHIFT),
copyBytes);
}
@Override
public long getLong() {
return getNativeOrderedLong();
}
@Override
public long getLong(final long offsetBytes) {
return getNativeOrderedLong(offsetBytes);
}
@Override
public void getLongArray(final long[] dstArray, final int dstOffsetLongs, final int lengthLongs) {
final long pos = getPosition();
final long copyBytes = ((long) lengthLongs) << LONG_SHIFT;
incrementAndCheckPositionForRead(pos, copyBytes);
ResourceImpl.checkBounds(dstOffsetLongs, lengthLongs, dstArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
getUnsafeObject(),
getCumulativeOffset(pos),
dstArray,
ARRAY_LONG_BASE_OFFSET + (((long) dstOffsetLongs) << LONG_SHIFT),
copyBytes);
}
@Override
public short getShort() {
return getNativeOrderedShort();
}
@Override
public short getShort(final long offsetBytes) {
return getNativeOrderedShort(offsetBytes);
}
@Override
public void getShortArray(final short[] dstArray, final int dstOffsetShorts,
final int lengthShorts) {
final long pos = getPosition();
final long copyBytes = ((long) lengthShorts) << SHORT_SHIFT;
incrementAndCheckPositionForRead(pos, copyBytes);
ResourceImpl.checkBounds(dstOffsetShorts, lengthShorts, dstArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
getUnsafeObject(),
getCumulativeOffset(pos),
dstArray,
ARRAY_SHORT_BASE_OFFSET + (((long) dstOffsetShorts) << SHORT_SHIFT),
copyBytes);
}
//PRIMITIVE putX() and putXArray()
@Override
public void putChar(final char value) {
putNativeOrderedChar(value);
}
@Override
public void putChar(final long offsetBytes, final char value) {
putNativeOrderedChar(offsetBytes, value);
}
@Override
public void putCharArray(final char[] srcArray, final int srcOffsetChars, final int lengthChars) {
final long pos = getPosition();
final long copyBytes = ((long) lengthChars) << CHAR_SHIFT;
incrementAndCheckPositionForWrite(pos, copyBytes);
ResourceImpl.checkBounds(srcOffsetChars, lengthChars, srcArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
srcArray,
ARRAY_CHAR_BASE_OFFSET + (((long) srcOffsetChars) << CHAR_SHIFT),
getUnsafeObject(),
getCumulativeOffset(pos),
copyBytes);
}
@Override
public void putDouble(final double value) {
final long pos = getPosition();
incrementAndCheckPositionForWrite(pos, ARRAY_DOUBLE_INDEX_SCALE);
unsafe.putDouble(getUnsafeObject(), getCumulativeOffset(pos), value);
}
@Override
public void putDouble(final long offsetBytes, final double value) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_DOUBLE_INDEX_SCALE);
unsafe.putDouble(getUnsafeObject(), getCumulativeOffset(offsetBytes), value);
}
@Override
public void putDoubleArray(final double[] srcArray, final int srcOffsetDoubles,
final int lengthDoubles) {
final long pos = getPosition();
final long copyBytes = ((long) lengthDoubles) << DOUBLE_SHIFT;
incrementAndCheckPositionForWrite(pos, copyBytes);
ResourceImpl.checkBounds(srcOffsetDoubles, lengthDoubles, srcArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
srcArray,
ARRAY_DOUBLE_BASE_OFFSET + (((long) srcOffsetDoubles) << DOUBLE_SHIFT),
getUnsafeObject(),
getCumulativeOffset(pos),
copyBytes);
}
@Override
public void putFloat(final float value) {
final long pos = getPosition();
incrementAndCheckPositionForWrite(pos, ARRAY_FLOAT_INDEX_SCALE);
unsafe.putFloat(getUnsafeObject(), getCumulativeOffset(pos), value);
}
@Override
public void putFloat(final long offsetBytes, final float value) {
checkValidAndBoundsForWrite(offsetBytes, ARRAY_FLOAT_INDEX_SCALE);
unsafe.putFloat(getUnsafeObject(), getCumulativeOffset(offsetBytes), value);
}
@Override
public void putFloatArray(final float[] srcArray, final int srcOffsetFloats,
final int lengthFloats) {
final long pos = getPosition();
final long copyBytes = ((long) lengthFloats) << FLOAT_SHIFT;
incrementAndCheckPositionForWrite(pos, copyBytes);
ResourceImpl.checkBounds(srcOffsetFloats, lengthFloats, srcArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
srcArray,
ARRAY_FLOAT_BASE_OFFSET + (((long) srcOffsetFloats) << FLOAT_SHIFT),
getUnsafeObject(),
getCumulativeOffset(pos),
copyBytes);
}
@Override
public void putInt(final int value) {
putNativeOrderedInt(value);
}
@Override
public void putInt(final long offsetBytes, final int value) {
putNativeOrderedInt(offsetBytes, value);
}
@Override
public void putIntArray(final int[] srcArray, final int srcOffsetInts, final int lengthInts) {
final long pos = getPosition();
final long copyBytes = ((long) lengthInts) << INT_SHIFT;
incrementAndCheckPositionForWrite(pos, copyBytes);
ResourceImpl.checkBounds(srcOffsetInts, lengthInts, srcArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
srcArray,
ARRAY_INT_BASE_OFFSET + (((long) srcOffsetInts) << INT_SHIFT),
getUnsafeObject(),
getCumulativeOffset(pos),
copyBytes);
}
@Override
public void putLong(final long value) {
putNativeOrderedLong(value);
}
@Override
public void putLong(final long offsetBytes, final long value) {
putNativeOrderedLong(offsetBytes, value);
}
@Override
public void putLongArray(final long[] srcArray, final int srcOffsetLongs, final int lengthLongs) {
final long pos = getPosition();
final long copyBytes = ((long) lengthLongs) << LONG_SHIFT;
incrementAndCheckPositionForWrite(pos, copyBytes);
ResourceImpl.checkBounds(srcOffsetLongs, lengthLongs, srcArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
srcArray,
ARRAY_LONG_BASE_OFFSET + (((long) srcOffsetLongs) << LONG_SHIFT),
getUnsafeObject(),
getCumulativeOffset(pos),
copyBytes);
}
@Override
public void putShort(final short value) {
putNativeOrderedShort(value);
}
@Override
public void putShort(final long offsetBytes, final short value) {
putNativeOrderedShort(offsetBytes, value);
}
@Override
public void putShortArray(final short[] srcArray, final int srcOffsetShorts,
final int lengthShorts) {
final long pos = getPosition();
final long copyBytes = ((long) lengthShorts) << SHORT_SHIFT;
incrementAndCheckPositionForWrite(pos, copyBytes);
ResourceImpl.checkBounds(srcOffsetShorts, lengthShorts, srcArray.length);
CompareAndCopy.copyMemoryCheckingDifferentObject(
srcArray,
ARRAY_SHORT_BASE_OFFSET + (((long) srcOffsetShorts) << SHORT_SHIFT),
getUnsafeObject(),
getCumulativeOffset(pos),
copyBytes);
}
}
| 2,376 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/AllocateDirectWritableMap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.datasketches.memory.internal;
import static org.apache.datasketches.memory.internal.UnsafeUtil.unsafe;
import java.io.File;
import java.io.FileDescriptor;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.util.logging.Logger;
import sun.nio.ch.FileChannelImpl;
/**
* Allocates direct memory used to memory map files for read or write operations.
* (including those > 2GB).
*
* <p>To understand how it works, reference native code for map0, unmap0:
* <a href="http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/f940e7a48b72/src/solaris/native/sun/nio/ch/FileChannelImpl.c">
* FileChannelImpl.c</a></p>
*
* <p>To understand how it works, reference native code for load0(), isLoaded0(), and force0():
* <a href="http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/f940e7a48b72/src/solaris/native/java/nio/MappedByteBuffer.c">
* MappedByteBuffer.c</a></p>
*
* @author Roman Leventov
* @author Lee Rhodes
* @author Praveenkumar Venkatesan
*/
@SuppressWarnings("restriction")
class AllocateDirectWritableMap {
static final Logger LOG = Logger.getLogger(AllocateDirectWritableMap.class.getCanonicalName());
private static final int MAP_RO = 0;
private static final int MAP_RW = 1;
private static final Method FILE_CHANNEL_IMPL_MAP0_METHOD;
static final Method FILE_CHANNEL_IMPL_UNMAP0_METHOD;
private static final Method MAPPED_BYTE_BUFFER_LOAD0_METHOD;
private static final Method MAPPED_BYTE_BUFFER_ISLOADED0_METHOD;
static final Method MAPPED_BYTE_BUFFER_FORCE0_METHOD;
private static int pageSize = unsafe.pageSize();
static {
try { //The FileChannelImpl methods map0 and unmap0 still exist in 16
FILE_CHANNEL_IMPL_MAP0_METHOD = FileChannelImpl.class
.getDeclaredMethod("map0", int.class, long.class, long.class); //JDK14 add boolean.class
FILE_CHANNEL_IMPL_MAP0_METHOD.setAccessible(true);
FILE_CHANNEL_IMPL_UNMAP0_METHOD = FileChannelImpl.class
.getDeclaredMethod("unmap0", long.class, long.class); //OK through jDK16
FILE_CHANNEL_IMPL_UNMAP0_METHOD.setAccessible(true);
//The MappedByteBuffer methods load0, isLoaded0 and force0 are removed in 15
MAPPED_BYTE_BUFFER_LOAD0_METHOD = MappedByteBuffer.class
.getDeclaredMethod("load0", long.class, long.class); //JDK15 removed
MAPPED_BYTE_BUFFER_LOAD0_METHOD.setAccessible(true);
MAPPED_BYTE_BUFFER_ISLOADED0_METHOD = MappedByteBuffer.class
.getDeclaredMethod("isLoaded0", long.class, long.class, int.class); //JDK15 removed
MAPPED_BYTE_BUFFER_ISLOADED0_METHOD.setAccessible(true);
MAPPED_BYTE_BUFFER_FORCE0_METHOD = MappedByteBuffer.class
.getDeclaredMethod("force0", FileDescriptor.class, long.class, long.class); //JDK15 removed
MAPPED_BYTE_BUFFER_FORCE0_METHOD.setAccessible(true);
} catch (final SecurityException | NoSuchMethodException e) {
throw new RuntimeException("Could not reflect static methods: " + e);
}
}
private final Deallocator deallocator;
private final MemoryCleaner cleaner;
private final File file;
final long capacityBytes;
final RandomAccessFile raf;
final long nativeBaseOffset;
final boolean resourceReadOnly;
AllocateDirectWritableMap(final File file, final long fileOffsetBytes, final long capacityBytes,
final boolean localReadOnly) {
this.file = file;
this.capacityBytes = capacityBytes;
resourceReadOnly = isFileReadOnly(file);
final long fileLength = file.length();
if ((localReadOnly || resourceReadOnly) && fileOffsetBytes + capacityBytes > fileLength) {
throw new IllegalArgumentException(
"Read-only mode and requested map length is greater than current file length: "
+ "Requested Length = " + (fileOffsetBytes + capacityBytes)
+ ", Current File Length = " + fileLength);
}
raf = mapper(file, fileOffsetBytes, capacityBytes, resourceReadOnly);
nativeBaseOffset = map(raf.getChannel(), resourceReadOnly, fileOffsetBytes, capacityBytes);
deallocator = new Deallocator(nativeBaseOffset, capacityBytes, raf);
cleaner = new MemoryCleaner(this, deallocator);
}
public void close() {
try {
if (deallocator.deallocate(false)) {
// This Cleaner.clean() call effectively just removes the Cleaner from the internal linked
// list of all cleaners. It will delegate to Deallocator.deallocate() which will be a no-op
// because the valid state is already changed.
cleaner.clean();
}
} catch (final Exception e) { throw new IllegalStateException("Attempted close of Memory-Mapped File: "
+ file.getName() + " " + e);
} finally {
ResourceImpl.reachabilityFence(this);
}
}
public void force() {
try {
MAPPED_BYTE_BUFFER_FORCE0_METHOD
//force0 is effectively static, so ZERO_READ_ONLY_DIRECT_BYTE_BUFFER is not modified
.invoke(AccessByteBuffer.ZERO_READ_ONLY_DIRECT_BYTE_BUFFER,
raf.getFD(),
nativeBaseOffset,
capacityBytes);
} catch (final IOException | IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
throw new RuntimeException(String.format("Encountered %s exception in force. " + e.toString()));
}
}
public StepBoolean getValid() {
return deallocator.getValid();
}
public static boolean isFileReadOnly(final File file) {
return (!file.canWrite());
}
public boolean isLoaded() {
try {
return (boolean) MAPPED_BYTE_BUFFER_ISLOADED0_METHOD
//isLoaded0 is effectively static, so ZERO_READ_ONLY_DIRECT_BYTE_BUFFER is not modified
.invoke(AccessByteBuffer.ZERO_READ_ONLY_DIRECT_BYTE_BUFFER,
nativeBaseOffset,
capacityBytes,
pageCount(capacityBytes));
} catch (final IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
throw new RuntimeException(
String.format("Encountered %s exception while loading", e.getClass()));
}
}
public void load() {
madvise();
// Performance optimization. Read a byte from each page to bring it into memory.
final int count = pageCount(capacityBytes);
long offset = nativeBaseOffset;
for (int i = 0; i < count; i++) {
unsafe.getByte(offset);
offset += pageSize;
}
}
// Private methods
/**
* called by load(). Calls the native method load0 in MappedByteBuffer.java, implemented
* in MappedByteBuffer.c. See reference at top of class. load0 allows setting a mapping length
* of greater than 2GB.
*/
private void madvise() {
try {
MAPPED_BYTE_BUFFER_LOAD0_METHOD
//load0 is effectively static, so ZERO_READ_ONLY_DIRECT_BYTE_BUFFER is not modified
.invoke(AccessByteBuffer.ZERO_READ_ONLY_DIRECT_BYTE_BUFFER,
nativeBaseOffset,
capacityBytes);
} catch (final IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
throw new RuntimeException(
String.format("Encountered %s exception while loading", e.getClass()));
}
}
private static int pageCount(final long bytes) {
return (int)((bytes + pageSize) - 1L) / pageSize;
}
//Does the actual mapping work, resourceReadOnly must already be set
private static RandomAccessFile mapper(final File file, final long fileOffset,
final long capacityBytes, final boolean resourceReadOnly) {
final String mode = resourceReadOnly ? "r" : "rw";
final RandomAccessFile raf;
try {
raf = new RandomAccessFile(file, mode);
if (fileOffset + capacityBytes > raf.length()) {
raf.setLength(fileOffset + capacityBytes);
}
} catch (final IOException e) {
throw new RuntimeException(e);
}
return raf;
}
/**
* Creates a mapping of the FileChannel starting at position and of size length to pages
* in the OS. This may throw OutOfMemory error if you have exhausted memory.
* You can try to force garbage collection and re-attempt.
*
* <p>map0 is a native method of FileChannelImpl.java implemented in FileChannelImpl.c.
* See reference at top of class.</p>
*
* @param fileChannel the FileChannel
* @param position the offset in bytes into the FileChannel
* @param lengthBytes the length in bytes
* @return the native base offset address
* @throws RuntimeException Encountered an exception while mapping
*/
private static long map(final FileChannel fileChannel, final boolean resourceReadOnly,
final long position, final long lengthBytes) {
final int pagePosition = (int) (position % unsafe.pageSize());
final long mapPosition = position - pagePosition;
final long mapSize = lengthBytes + pagePosition;
final int mapMode = resourceReadOnly ? MAP_RO : MAP_RW;
//final boolean isSync = true; //required as of JDK14, but it is more complex
try {
final long nativeBaseOffset = //JDK14 add isSync
(long) FILE_CHANNEL_IMPL_MAP0_METHOD.invoke(fileChannel, mapMode, mapPosition, mapSize);
return nativeBaseOffset;
} catch (final InvocationTargetException e) {
throw new RuntimeException("Exception while mapping", e.getTargetException());
} catch (final IllegalAccessException e) {
throw new RuntimeException("Exception while mapping", e);
}
}
private static final class Deallocator implements Runnable {
private final RandomAccessFile myRaf;
private final FileChannel myFc;
//This is the only place the actual native offset is kept for use by unsafe.freeMemory();
private final long actualNativeBaseOffset;
private final long myCapacity;
private final StepBoolean valid = new StepBoolean(true); //only place for this
Deallocator(final long nativeBaseOffset, final long capacityBytes,
final RandomAccessFile raf) {
myRaf = raf;
assert myRaf != null;
myFc = myRaf.getChannel();
actualNativeBaseOffset = nativeBaseOffset;
assert actualNativeBaseOffset != 0;
myCapacity = capacityBytes;
assert myCapacity != 0;
}
StepBoolean getValid() {
return valid;
}
@Override
public void run() throws IllegalStateException {
deallocate(true);
}
boolean deallocate(final boolean calledFromCleaner) throws IllegalStateException {
if (valid.change()) {
if (calledFromCleaner) {
// Warn about non-deterministic resource cleanup.
LOG.warning("A direct mapped resource was not closed explicitly");
}
unmap();
return true;
}
return false;
}
/**
* Removes existing mapping. <i>unmap0</i> is a native method in FileChannelImpl.c. See
* reference at top of class.
*/
private void unmap() throws IllegalStateException {
try {
FILE_CHANNEL_IMPL_UNMAP0_METHOD.invoke(myFc, actualNativeBaseOffset, myCapacity);
myRaf.close();
} catch (final IllegalAccessException | IllegalArgumentException | InvocationTargetException | IOException e) {
throw new IllegalStateException(String.format("Encountered %s exception while freeing memory", e.getClass()));
}
}
} //End of class Deallocator
}
| 2,377 |
0 | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory | Create_ds/datasketches-memory/datasketches-memory-java8/src/main/java/org/apache/datasketches/memory/internal/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* @author Lee Rhodes
*/
package org.apache.datasketches.memory.internal;
| 2,378 |
0 | Create_ds/c3r/c3r-sdk-examples/src/test/java/com/amazonaws/c3r/examples | Create_ds/c3r/c3r-sdk-examples/src/test/java/com/amazonaws/c3r/examples/utils/FileTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.examples.utils;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
/**
* A test utility for creating temporary Path resources for tests that will clean themselves up after execution.
*/
public abstract class FileTestUtility {
/**
* Creates a temporary directory with the prefix "temp" marked with deleteOnExit.
*
* @return A temporary Path
* @throws IOException If the temporary Path cannot be created
*/
public static Path createTempDir() throws IOException {
final Path tempDir = Files.createTempDirectory("temp");
tempDir.toFile().deleteOnExit();
return tempDir;
}
/**
* Creates a temporary file with the prefix "testFile" and suffix ".tmp" marked with deleteOnExit.
*
* @return A temporary Path
* @throws IOException If the temporary Path cannot be created
*/
public static Path createTempFile() throws IOException {
return createTempFile("testFile", ".tmp");
}
/**
* Creates a temporary file with the prefix and suffix provided marked with deleteOnExit.
*
* @param prefix The prefix of the Path to create
* @param suffix The suffix of the Path to create
* @return A temporary Path
* @throws IOException If the temporary Path cannot be created
*/
public static Path createTempFile(final String prefix, final String suffix) throws IOException {
final Path tempDir = createTempDir();
final Path tempFile = Files.createTempFile(tempDir, prefix, suffix);
tempFile.toFile().deleteOnExit();
return tempFile;
}
/**
* Resolves a temporary file with the file name provided marked with deleteOnExit.
*
* @param fileName The name of the Path to resolve
* @return A temporary Path
* @throws IOException If the temporary Path cannot be resolved
*/
public static Path resolve(final String fileName) throws IOException {
return resolve(fileName, createTempDir());
}
/**
* Resolves a temporary file with the prefix and suffix provided marked with deleteOnExit.
*
* @param fileName The name of the Path to resolve
* @param tempDir The Path to use to resolve the temporary file
* @return A temporary Path
*/
private static Path resolve(final String fileName, final Path tempDir) {
final Path tempFile = tempDir.resolve(fileName);
tempFile.toFile().deleteOnExit();
return tempFile;
}
}
| 2,379 |
0 | Create_ds/c3r/c3r-sdk-examples/src/test/java/com/amazonaws/c3r/examples | Create_ds/c3r/c3r-sdk-examples/src/test/java/com/amazonaws/c3r/examples/csv/CsvExampleTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.examples.csv;
import com.amazonaws.c3r.examples.utils.FileTestUtility;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class CsvExampleTest {
@Test
public void roundTripTest() throws IOException {
final Path inputCsv = Path.of("../samples/csv/data_sample_without_quotes.csv");
final Path encryptedCsv = FileTestUtility.createTempFile("encrypted", ".csv");
final Path decryptedCsv = FileTestUtility.createTempFile("decrypted", ".csv");
CsvExample.encrypt(inputCsv.toString(), encryptedCsv.toString());
assertTrue(Files.size(encryptedCsv) > 0);
CsvExample.decrypt(encryptedCsv.toString(), decryptedCsv.toString());
assertTrue(Files.size(decryptedCsv) > 0);
}
}
| 2,380 |
0 | Create_ds/c3r/c3r-sdk-examples/src/test/java/com/amazonaws/c3r/examples | Create_ds/c3r/c3r-sdk-examples/src/test/java/com/amazonaws/c3r/examples/csv/CsvNoHeaderExampleTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.examples.csv;
import com.amazonaws.c3r.examples.utils.FileTestUtility;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class CsvNoHeaderExampleTest {
@Test
public void roundTripTest() throws IOException {
final Path inputCsv = Path.of("../samples/csv/data_sample_no_headers.csv");
final Path encryptedCsv = FileTestUtility.createTempFile("encrypted", ".csv");
final Path decryptedCsv = FileTestUtility.createTempFile("decrypted", ".csv");
CsvNoHeaderExample.encrypt(inputCsv.toString(), encryptedCsv.toString());
assertTrue(Files.size(encryptedCsv) > 0);
CsvNoHeaderExample.decrypt(encryptedCsv.toString(), decryptedCsv.toString());
assertTrue(Files.size(decryptedCsv) > 0);
}
}
| 2,381 |
0 | Create_ds/c3r/c3r-sdk-examples/src/test/java/com/amazonaws/c3r/examples | Create_ds/c3r/c3r-sdk-examples/src/test/java/com/amazonaws/c3r/examples/parquet/ParquetExampleTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.examples.parquet;
import com.amazonaws.c3r.examples.utils.FileTestUtility;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class ParquetExampleTest {
@Test
public void roundTripTest() throws IOException {
final Path inputParquet = Path.of("../samples/parquet/data_sample.parquet");
final Path encryptedParquet = FileTestUtility.createTempFile("encrypted", ".parquet");
final Path decryptedParquet = FileTestUtility.createTempFile("decrypted", ".parquet");
ParquetExample.encrypt(inputParquet.toString(), encryptedParquet.toString());
assertTrue(Files.size(encryptedParquet) > 0);
ParquetExample.decrypt(encryptedParquet.toString(), decryptedParquet.toString());
assertTrue(Files.size(decryptedParquet) > 0);
}
}
| 2,382 |
0 | Create_ds/c3r/c3r-sdk-examples/src/test/java/com/amazonaws/c3r/examples | Create_ds/c3r/c3r-sdk-examples/src/test/java/com/amazonaws/c3r/examples/spark/SparkExampleTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.examples.spark;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.examples.utils.FileTestUtility;
import com.amazonaws.c3r.json.GsonUtil;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class SparkExampleTest {
@Test
public void roundTripTest() throws IOException {
final Path source = Path.of("../samples/csv/data_sample_with_quotes.csv");
final Path encryptTarget = FileTestUtility.createTempDir();
final Path schemaFile = Path.of("../samples/schema/config_sample.json");
final TableSchema schema = GsonUtil.fromJson(Files.readString(schemaFile), TableSchema.class);
SparkExample.encrypt(source.toString(), encryptTarget.toString(), schema);
final List<File> encryptedCsvs = Arrays.stream(Objects.requireNonNull(encryptTarget.toFile().listFiles()))
.filter(file -> file.getAbsolutePath().endsWith(".csv"))
.collect(Collectors.toList());
for (File encryptedCsv : encryptedCsvs) {
assertNotNull(encryptedCsv);
assertTrue(encryptedCsv.exists());
assertTrue(Files.size(encryptedCsv.toPath()) > 0);
}
final Path decryptTarget = FileTestUtility.createTempDir();
for (File encryptedCsv : encryptedCsvs) {
SparkExample.decrypt(encryptedCsv.getAbsolutePath(), decryptTarget.toString());
assertNotNull(encryptedCsv);
assertTrue(encryptedCsv.exists());
assertTrue(Files.size(encryptedCsv.toPath()) > 0);
}
final List<File> decryptedCsvs = Arrays.stream(Objects.requireNonNull(decryptTarget.toFile().listFiles()))
.filter(file -> file.getAbsolutePath().endsWith(".csv"))
.collect(Collectors.toList());
for (File decryptedCsv : decryptedCsvs) {
assertNotNull(decryptedCsv);
assertTrue(decryptedCsv.exists());
assertTrue(Files.size(decryptedCsv.toPath()) > 0);
}
}
}
| 2,383 |
0 | Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples | Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples/csv/CsvExample.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.examples.csv;
import com.amazonaws.c3r.action.CsvRowMarshaller;
import com.amazonaws.c3r.action.CsvRowUnmarshaller;
import com.amazonaws.c3r.action.RowMarshaller;
import com.amazonaws.c3r.action.RowUnmarshaller;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.DecryptConfig;
import com.amazonaws.c3r.config.EncryptConfig;
import com.amazonaws.c3r.config.MappedTableSchema;
import com.amazonaws.c3r.config.Pad;
import com.amazonaws.c3r.config.PadType;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.data.CsvValue;
import com.amazonaws.c3r.encryption.keys.KeyUtil;
import com.amazonaws.c3r.io.FileFormat;
import java.util.List;
/**
* Examples of encrypting and decrypting CSV files.
*/
public final class CsvExample {
/**
* An example 32-byte key used for testing.
*/
private static final String EXAMPLE_SHARED_SECRET_KEY = "AAECAwQFBgcICQoLDA0ODxAREhMUFrEXAMPLESECRET=";
/**
* Example collaboration ID, i.e., the value used by all participating parties as a salt for encryption.
*/
private static final String EXAMPLE_SALT = "00000000-1111-2222-3333-444444444444";
/**
* Table schema for an input file with a header row which contains (at least) the following columns
* (case-insensitive, leading and trailing whitespace are ignored).
* <ul>
* <li>firstname</li>
* <li>lastname</li>
* <li>address</li>
* <li>city</li>
* <li>state</li>
* <li>phonenumber</li>
* <li>title</li>
* <li>level</li>
* <li>notes</li>
* </ul>
*/
private static final TableSchema EXAMPLE_TABLE_SCHEMA = new MappedTableSchema(List.of(
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("firstname"))
.targetHeader(new ColumnHeader("fname"))
.type(ColumnType.CLEARTEXT)
.build(),
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("lastname"))
.targetHeader(new ColumnHeader("lname"))
.type(ColumnType.CLEARTEXT)
.build(),
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("address"))
.targetHeader(new ColumnHeader("address"))
.pad(Pad.builder().type(PadType.MAX).length(32).build())
.type(ColumnType.SEALED)
.build(),
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("city"))
.targetHeader(new ColumnHeader("city"))
.pad(Pad.builder().type(PadType.MAX).length(16).build())
.type(ColumnType.SEALED)
.build(),
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("state"))
.targetHeader(new ColumnHeader("state"))
.type(ColumnType.FINGERPRINT)
.build(),
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("phonenumber"))
.targetHeader(new ColumnHeader("phonenumber_cleartext"))
.pad(null)
.type(ColumnType.CLEARTEXT)
.build(),
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("phonenumber"))
.targetHeader(new ColumnHeader("phonenumber_sealed"))
.pad(Pad.DEFAULT)
.type(ColumnType.SEALED)
.build(),
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("phonenumber"))
.targetHeader(new ColumnHeader("phonenumber_fingerprint"))
.type(ColumnType.FINGERPRINT)
.build(),
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("title"))
.targetHeader(new ColumnHeader("title"))
.pad(Pad.builder().type(PadType.FIXED).length(128).build())
.type(ColumnType.SEALED)
.build(),
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("level"))
.targetHeader(new ColumnHeader("level"))
.pad(null)
.type(ColumnType.CLEARTEXT)
.build(),
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("notes"))
.targetHeader(new ColumnHeader("notes"))
.pad(Pad.builder().type(PadType.MAX).length(100).build())
.type(ColumnType.SEALED)
.build()
));
/**
* Hidden demo class constructor.
*/
private CsvExample() {
}
/**
* Encrypt a file with the following columns with a predetermined schema, shared secret key, and collaboration ID.
* <ul>
* <li>firstname</li>
* <li>lastname</li>
* <li>address</li>
* <li>city</li>
* <li>state</li>
* <li>phonenumber</li>
* <li>title</li>
* <li>level</li>
* <li>notes</li>
* </ul>
*
* @param sourceFile Source CSV file matching aforementioned schema
* @param targetFile Destination for encrypted table
*/
public static void encrypt(final String sourceFile,
final String targetFile) {
final var encryptionConfig = EncryptConfig.builder()
.sourceFile(sourceFile)
.targetFile(targetFile)
.fileFormat(FileFormat.CSV)
.secretKey(KeyUtil.sharedSecretKeyFromString(EXAMPLE_SHARED_SECRET_KEY))
.salt(EXAMPLE_SALT)
.tempDir(".")
.settings(ClientSettings.lowAssuranceMode())
.tableSchema(EXAMPLE_TABLE_SCHEMA)
.overwrite(true)
.build();
final RowMarshaller<CsvValue> csvRowMarshaller =
CsvRowMarshaller.newInstance(encryptionConfig);
csvRowMarshaller.marshal();
csvRowMarshaller.close();
}
/**
* Decrypt an encrypted table for a predetermined shared secret key, and salt.
*
* @param sourceFile Encrypted table to decrypt
* @param targetFile Where to store decrypted results
*/
public static void decrypt(final String sourceFile,
final String targetFile) {
final var decryptConfig = DecryptConfig.builder()
.sourceFile(sourceFile)
.targetFile(targetFile)
.fileFormat(FileFormat.CSV)
.secretKey(KeyUtil.sharedSecretKeyFromString(EXAMPLE_SHARED_SECRET_KEY))
.salt(EXAMPLE_SALT)
.overwrite(true)
.build();
final RowUnmarshaller<CsvValue> csvRowUnmarshaller =
CsvRowUnmarshaller.newInstance(decryptConfig);
csvRowUnmarshaller.unmarshal();
csvRowUnmarshaller.close();
}
} | 2,384 |
0 | Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples | Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples/csv/CsvNoHeaderExample.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.examples.csv;
import com.amazonaws.c3r.action.CsvRowMarshaller;
import com.amazonaws.c3r.action.CsvRowUnmarshaller;
import com.amazonaws.c3r.action.RowMarshaller;
import com.amazonaws.c3r.action.RowUnmarshaller;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.DecryptConfig;
import com.amazonaws.c3r.config.EncryptConfig;
import com.amazonaws.c3r.config.Pad;
import com.amazonaws.c3r.config.PadType;
import com.amazonaws.c3r.config.PositionalTableSchema;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.data.CsvValue;
import com.amazonaws.c3r.encryption.keys.KeyUtil;
import com.amazonaws.c3r.io.FileFormat;
import java.util.List;
/**
* Examples of encrypting and decrypting a CSV file with no headers in the input files.
*/
public final class CsvNoHeaderExample {
/**
* An example 32-byte key used for testing.
*/
private static final String EXAMPLE_SHARED_SECRET_KEY = "AAECAwQFBgcICQoLDA0ODxAREhMUFrEXAMPLESECRET=";
/**
* Example collaboration ID, i.e., the value used by all participating parties as a salt for encryption.
*/
private static final String EXAMPLE_SALT = "00000000-1111-2222-3333-444444444444";
/**
* Table schema for an input CSV file with no header row and exactly 9 columns. Each List of
* ColumnSchema indicates how many output columns that positional input column should be mapped to.
*/
private static final TableSchema EXAMPLE_TABLE_SCHEMA = new PositionalTableSchema(List.of(
List.of(ColumnSchema.builder()
.targetHeader(new ColumnHeader("fname"))
.type(ColumnType.CLEARTEXT)
.build()),
List.of(ColumnSchema.builder()
.targetHeader(new ColumnHeader("lname"))
.type(ColumnType.CLEARTEXT)
.build()),
List.of(ColumnSchema.builder()
.targetHeader(new ColumnHeader("address"))
.pad(Pad.builder().type(PadType.MAX).length(32).build())
.type(ColumnType.SEALED)
.build()),
List.of(ColumnSchema.builder()
.targetHeader(new ColumnHeader("city"))
.pad(Pad.builder().type(PadType.MAX).length(16).build())
.type(ColumnType.SEALED)
.build()),
List.of(ColumnSchema.builder()
.targetHeader(new ColumnHeader("state"))
.type(ColumnType.FINGERPRINT)
.build()),
// We map a single input column to multiple output columns by providing a list with
// the desired number of ColumnSchema in that column's position.
List.of(ColumnSchema.builder()
.targetHeader(new ColumnHeader("phonenumber_cleartext"))
.pad(null)
.type(ColumnType.CLEARTEXT)
.build(),
ColumnSchema.builder()
.targetHeader(new ColumnHeader("phonenumber_sealed"))
.pad(Pad.DEFAULT)
.type(ColumnType.SEALED)
.build(),
ColumnSchema.builder()
.targetHeader(new ColumnHeader("phonenumber_fingerprint"))
.type(ColumnType.FINGERPRINT)
.build()),
List.of(ColumnSchema.builder()
.targetHeader(new ColumnHeader("title"))
.pad(Pad.builder().type(PadType.FIXED).length(128).build())
.type(ColumnType.SEALED)
.build()),
List.of(ColumnSchema.builder()
.targetHeader(new ColumnHeader("level"))
.pad(null)
.type(ColumnType.CLEARTEXT)
.build()),
// We omit the last column from our encrypted table by providing an
// empty list of ColumnSchema at that position.
List.of()
));
/**
* Hidden demo class constructor.
*/
private CsvNoHeaderExample() {
}
/**
* Encrypts a CSV file with no header row and exactly 9 columns according to a predetermined schema.
*
* @param sourceFile Source CSV file with no header row and exactly 9 columns
* @param targetFile Destination for encrypted table
*/
public static void encrypt(final String sourceFile,
final String targetFile) {
final var encryptionConfig = EncryptConfig.builder()
.sourceFile(sourceFile)
.targetFile(targetFile)
.fileFormat(FileFormat.CSV)
.secretKey(KeyUtil.sharedSecretKeyFromString(EXAMPLE_SHARED_SECRET_KEY))
.salt(EXAMPLE_SALT)
.tempDir(".")
.settings(ClientSettings.lowAssuranceMode())
.tableSchema(EXAMPLE_TABLE_SCHEMA)
.overwrite(true)
.build();
final RowMarshaller<CsvValue> csvRowMarshaller =
CsvRowMarshaller.newInstance(encryptionConfig);
csvRowMarshaller.marshal();
csvRowMarshaller.close();
}
/**
* Decrypt an encrypted table for a predetermined shared secret key, and salt.
*
* @param sourceFile Encrypted table to decrypt
* @param targetFile Where to store decrypted results
*/
public static void decrypt(final String sourceFile,
final String targetFile) {
final var decryptConfig = DecryptConfig.builder()
.sourceFile(sourceFile)
.targetFile(targetFile)
.fileFormat(FileFormat.CSV)
.secretKey(KeyUtil.sharedSecretKeyFromString(EXAMPLE_SHARED_SECRET_KEY))
.salt(EXAMPLE_SALT)
.overwrite(true)
.build();
final RowUnmarshaller<CsvValue> csvRowUnmarshaller =
CsvRowUnmarshaller.newInstance(decryptConfig);
csvRowUnmarshaller.unmarshal();
csvRowUnmarshaller.close();
}
}
| 2,385 |
0 | Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples | Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples/csv/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Sample code showing how to use the SDK with CSV data.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.examples.csv; | 2,386 |
0 | Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples | Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples/parquet/ParquetExample.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.examples.parquet;
import com.amazonaws.c3r.action.ParquetRowMarshaller;
import com.amazonaws.c3r.action.ParquetRowUnmarshaller;
import com.amazonaws.c3r.action.RowMarshaller;
import com.amazonaws.c3r.action.RowUnmarshaller;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.DecryptConfig;
import com.amazonaws.c3r.config.EncryptConfig;
import com.amazonaws.c3r.config.MappedTableSchema;
import com.amazonaws.c3r.config.Pad;
import com.amazonaws.c3r.config.PadType;
import com.amazonaws.c3r.config.ParquetConfig;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.data.ParquetValue;
import com.amazonaws.c3r.encryption.keys.KeyUtil;
import com.amazonaws.c3r.io.FileFormat;
import java.util.List;
/**
* Example code for creating a schema, encrypting and decrypting Parquet data.
*/
public final class ParquetExample {
/**
* An example 32-byte key used for testing.
*/
private static final String EXAMPLE_SHARED_SECRET_KEY = "AAECAwQFBgcICQoLDA0ODxAREhMUFrEXAMPLESECRET=";
/**
* An example salt for testing.
*/
private static final String EXAMPLE_SALT = "00000000-1111-2222-3333-444444444444";
/**
* Generates a table schema. The input file has a header row which contains (at least) the following columns
* (case-insensitive, leading and trailing whitespace are ignored):
* <ul>
* <li>firstname</li>
* <li>lastname</li>
* <li>address</li>
* <li>city</li>
* <li>state</li>
* <li>phonenumber</li>
* <li>title</li>
* <li>level</li>
* <li>notes</li>
* </ul>
*/
private static final TableSchema EXAMPLE_TABLE_SCHEMA = new MappedTableSchema(List.of(
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("firstname"))
.targetHeader(new ColumnHeader("fname"))
.type(ColumnType.CLEARTEXT)
.build(),
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("lastname"))
.targetHeader(new ColumnHeader("lname"))
.type(ColumnType.CLEARTEXT)
.build(),
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("address"))
.targetHeader(new ColumnHeader("address"))
.pad(Pad.builder().type(PadType.MAX).length(32).build())
.type(ColumnType.SEALED)
.build(),
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("city"))
.targetHeader(new ColumnHeader("city"))
.pad(Pad.builder().type(PadType.MAX).length(16).build())
.type(ColumnType.SEALED)
.build(),
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("state"))
.targetHeader(new ColumnHeader("state"))
.type(ColumnType.FINGERPRINT)
.build(),
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("phonenumber"))
.targetHeader(new ColumnHeader("phonenumber_cleartext"))
.pad(null)
.type(ColumnType.CLEARTEXT)
.build(),
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("phonenumber"))
.targetHeader(new ColumnHeader("phonenumber_sealed"))
.pad(Pad.DEFAULT)
.type(ColumnType.SEALED)
.build(),
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("phonenumber"))
.targetHeader(new ColumnHeader("phonenumber_fingerprint"))
.type(ColumnType.FINGERPRINT)
.build(),
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("title"))
.targetHeader(new ColumnHeader("title"))
.pad(Pad.builder().type(PadType.FIXED).length(128).build())
.type(ColumnType.SEALED)
.build(),
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("level"))
.targetHeader(new ColumnHeader("level"))
.pad(null)
.type(ColumnType.CLEARTEXT)
.build(),
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("notes"))
.targetHeader(new ColumnHeader("notes"))
.pad(Pad.builder().type(PadType.MAX).length(100).build())
.type(ColumnType.SEALED)
.build()
));
/**
* Hidden example class constructor.
*/
private ParquetExample() {
}
/**
* Demonstrates encrypting a file. Uses the following columns with a predetermined schema, shared secret key, and collaboration ID:
* <ul>
* <li>firstname</li>
* <li>lastname</li>
* <li>address</li>
* <li>city</li>
* <li>state</li>
* <li>phonenumber</li>
* <li>title</li>
* <li>level</li>
* <li>notes</li>
* </ul>
*
* @param sourceFile Source CSV file matching aforementioned schema
* @param targetFile Destination for encrypted table
*/
public static void encrypt(final String sourceFile,
final String targetFile) {
final var encryptionConfig = EncryptConfig.builder()
.sourceFile(sourceFile)
.targetFile(targetFile)
.fileFormat(FileFormat.PARQUET)
.secretKey(KeyUtil.sharedSecretKeyFromString(EXAMPLE_SHARED_SECRET_KEY))
.salt(EXAMPLE_SALT)
.tempDir(".")
.settings(ClientSettings.lowAssuranceMode())
.tableSchema(EXAMPLE_TABLE_SCHEMA)
.overwrite(true)
.build();
final RowMarshaller<ParquetValue> parquetRowMarshaller =
ParquetRowMarshaller.newInstance(encryptionConfig, ParquetConfig.DEFAULT);
parquetRowMarshaller.marshal();
parquetRowMarshaller.close();
}
/**
* Decrypt an encrypted table for a predetermined shared secret key, and collaboration ID.
*
* @param sourceFile Encrypted table to decrypt
* @param targetFile Where to store decrypted results
*/
public static void decrypt(final String sourceFile,
final String targetFile) {
final var decryptConfig = DecryptConfig.builder()
.sourceFile(sourceFile)
.targetFile(targetFile)
.fileFormat(FileFormat.PARQUET)
.secretKey(KeyUtil.sharedSecretKeyFromString(EXAMPLE_SHARED_SECRET_KEY))
.salt(EXAMPLE_SALT)
.overwrite(true)
.build();
final RowUnmarshaller<ParquetValue> parquetRowUnmarshaller =
ParquetRowUnmarshaller.newInstance(decryptConfig);
parquetRowUnmarshaller.unmarshal();
parquetRowUnmarshaller.close();
}
}
| 2,387 |
0 | Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples | Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples/parquet/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Sample code showing how to use the SDK with Parquet data.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.examples.parquet; | 2,388 |
0 | Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples | Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples/spark/SparkExample.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.examples.spark;
import com.amazonaws.c3r.Transformer;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnInsight;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.data.ClientDataType;
import com.amazonaws.c3r.encryption.EncryptionContext;
import com.amazonaws.c3r.encryption.keys.KeyUtil;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.internal.Nonce;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.Column;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder;
import org.apache.spark.sql.catalyst.encoders.RowEncoder;
import org.apache.spark.sql.functions;
import scala.jdk.CollectionConverters;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
/**
* Example code for running Spark.
*
* <p>
* Note that there are a few differences between C3R's pre-packaged offerings and orchestrating with Spark.
*
* <p>
* The most important difference is the change in trust boundaries. When using the C3R normally, files exist on the same machine running
* C3R. C3R never writes any data to disk unencrypted unless it is meant to be unencrypted in the output. With Spark, as an input file is
* read, Spark is partitioning that data in memory and/or on disk before C3R ever gets an opportunity to encrypt it. This means that
* cleartext forms of data that will eventually be encrypted may be written to disk and/or distributed to Spark Workers before it is
* encrypted. Further, Spark Workers may exist on other machines or networks. If a Spark job fails, there could be
* cleartext copies of the input file leftover across your Spark infrastructure. It is up to you to understand if this is permissible
* for your threat model and to configure your Spark server according to your needs.
*
* <p>
* Second, this Spark example is not managing file permissions for the output file. C3R normally sets this file to be RW by the Owner
* only. Files written by Spark will inherit the permissions of where they are written.
*
* <p>
* Third, Spark partitions and distributes the cleartext data before C3R drops columns that will not be included in the output. When
* using the C3R SDK or CLI, these columns are dropped during the data load step before they're ever written to disk. If these columns
* should never leave the initial location, they should be removed from the data before it is handed to this Spark example.
*
* <p>
* Fourth, Spark may partition the data and thus the output files. You may need to take additional steps to merge the data if downstream
* steps require it be one file. Note that when using S3 and Glue with AWS Clean Rooms, this should not be necessary.
*
* <p>
* Finally, certain functionality like shuffling rows, dropping columns, finding max length of values in a column, and finding duplicate
* values in a column are all revised in this example to take advantage of Spark. These are normally handled by C3R's
* {@link com.amazonaws.c3r.action.RowMarshaller}. All of these functions will behave the same as they do with C3R except shuffling rows.
* Instead of sorting on Nonces created using Java's {@code SecureRandom}, Spark is using its own {@code rand()} function for the shuffle.
*/
public final class SparkExample {
/**
* An example 32-byte key used for testing.
*/
private static final String EXAMPLE_SHARED_SECRET_KEY = "AAECAwQFBgcICQoLDA0ODxAREhMUFrEXAMPLESECRET=";
/**
* Example collaboration ID, i.e., the value used by all participating parties as a salt for encryption.
*/
private static final String EXAMPLE_SALT = "00000000-1111-2222-3333-444444444444";
/**
* Insights for all the target columns that will be written.
*/
private static Collection<ColumnInsight> columnInsights;
/** Hidden utility constructor. */
private SparkExample() {
}
/**
* Create a Spark session for running the encrypt/decrypt methods.
*
* <p>
* This method will by default create a local Spark Driver. Modify the URL of the Spark Driver within this function to run
* this example on another Spark Driver.
*
* @return A spark session
*/
private static SparkSession initSparkSession() {
// CHECKSTYLE:OFF
final SparkConf conf = new SparkConf()
.setAppName("C3RSparkDemo")
// Update this to point to your own Spark Driver if not running this locally.
.setMaster("local[*]");
// CHECKSTYLE:ON
return SparkSession
.builder()
.config(conf)
.getOrCreate();
}
/**
* Sample of Spark orchestrating the C3R SDK for encryption.
*
* <p>
* This function is currently setup to only process CSV files. It can be modified to instead take a {@code Dataset<Row>}. There is no
* functionality specific to a CSV after the initial data load.
*
* <p>
* Please note that only {@code String} data types are currently supported.
*
* @param source input file
* @param target output file
* @param schema schema file
*/
public static void encrypt(final String source, final String target, final TableSchema schema) {
final SparkSession spark = initSparkSession();
final ClientSettings clientSettings = ClientSettings.lowAssuranceMode();
columnInsights = schema.getColumns().stream().map(ColumnInsight::new)
.collect(Collectors.toList());
Dataset<Row> rawInputData = readInput(source, spark);
rawInputData = filterSourceColumnsBySchema(rawInputData);
updateMaxValuesPerColumn(spark, rawInputData);
validateDuplicates(clientSettings, rawInputData);
rawInputData = shuffleData(rawInputData);
rawInputData = mapSourceToTargetColumns(rawInputData);
populateColumnPositions(rawInputData);
rawInputData = marshalData(rawInputData);
rawInputData.write().mode(SaveMode.Append).option("header", true).csv(target);
closeSparkSession(spark);
}
/**
* Sample of Spark orchestrating the C3R SDK for decryption.
*
* <p>
* This function is currently setup to only process CSV files. It can be modified to instead take a {@code Dataset<Row>}. There is no
* functionality specific to a CSV after the initial data load.
*
* <p>
* Please note that only {@code String} data types are currently supported.
*
* @param source input file
* @param target output file
*/
public static void decrypt(final String source, final String target) {
final SparkSession spark = initSparkSession();
Dataset<Row> rawInputData = readInput(source, spark);
rawInputData = unmarshalData(rawInputData);
rawInputData.write().mode(SaveMode.Append).option("header", true).csv(target);
closeSparkSession(spark);
}
/**
* Reads the input file for processing.
*
* <p>
* NOTE: Empty values in CSVs are treated as null by default when Spark parses them. To configure nulls with
* Spark, see the <a href="https://spark.apache.org/docs/latest/sql-data-sources-csv.html">Spark documentation on CSVs</a>.
*
* @param source input file
* @param spark the SparkSession to read with
* @return The source data to be processed
*/
private static Dataset<Row> readInput(final String source, final SparkSession spark) {
return spark.read()
.option("header", "true") // Filter out the header row
.option("inferSchema", "false") // Treat all fields as Strings
.option("nullValue", null)
.option("emptyValue", null)
.csv(source);
}
/**
* Filter source columns not in the schema.
*
* <p>
* This is normally handled by C3R's {@link com.amazonaws.c3r.action.RowMarshaller} by dropping columns that won't be in the output
* during the data load.
*
* @param rawInputData the Dataset to filter
* @return A Dataset containing only source columns defined in the schema
*/
static Dataset<Row> filterSourceColumnsBySchema(final Dataset<Row> rawInputData) {
final Set<ColumnHeader> schemaSourceColumns = columnInsights.stream()
.map(ColumnSchema::getSourceHeader)
.collect(Collectors.toSet());
final Set<ColumnHeader> inputColumns = Arrays.stream(rawInputData.columns())
.map(ColumnHeader::new)
.collect(Collectors.toSet());
inputColumns.removeAll(schemaSourceColumns);
Dataset<Row> toReturn = rawInputData;
for (ColumnHeader columnHeader : inputColumns) {
toReturn = toReturn.drop(columnHeader.toString());
}
return toReturn;
}
/**
* Updates {@link #columnInsights} with the max value length of their columns. These values are used during encryption whenever
* {@link com.amazonaws.c3r.config.PadType#MAX} is configured for a sealed column.
*
* <p>
* This is normally handled by C3R's {@link com.amazonaws.c3r.action.RowMarshaller} tracking the size of each value being read in
* during the data load.
*
* @param spark The SparkSession to run the queries in
* @param rawInputData The Dataset to run the queries against
*/
static void updateMaxValuesPerColumn(final SparkSession spark, final Dataset<Row> rawInputData) {
rawInputData.createOrReplaceTempView("rawData");
final Map<ColumnHeader, List<ColumnInsight>> sourceMappedColumnInsights = columnInsights.stream()
.collect(Collectors.groupingBy(ColumnInsight::getSourceHeader));
Arrays.stream(rawInputData.columns()).forEach(col -> {
final int maxValue = spark.sql("SELECT max(length(" + col + ")) FROM rawData").first().getInt(0);
final ColumnHeader columnHeader = new ColumnHeader(col);
for (ColumnInsight insight : sourceMappedColumnInsights.get(columnHeader)) {
insight.setMaxValueLength(maxValue);
}
});
}
/**
* Validates whether the input data meets the encryption settings for `allowDuplicates`.
*
* <p>
* This is normally handled by C3R's {@link com.amazonaws.c3r.action.RowMarshaller} querying the temporary SQL table data is loaded
* to.
*
* @param clientSettings The encryption settings to validate with
* @param rawInputData The Dataset to be validated
* @throws C3rRuntimeException If input data is invalid
*/
static void validateDuplicates(final ClientSettings clientSettings, final Dataset<Row> rawInputData) {
if (clientSettings.isAllowDuplicates()) {
return;
}
// Check for duplicates when `allowDuplicates` is false
final String[] fingerprintColumns = columnInsights.stream()
.filter(columnSchema -> columnSchema.getType() == ColumnType.FINGERPRINT) // enforced on fingerprint columns only
.map(ColumnSchema::getSourceHeader)
.map(ColumnHeader::toString)
.distinct()
.toArray(String[]::new);
// Check for duplicate non-null values
for (String col : fingerprintColumns) {
final Dataset<Row> filteredData = rawInputData.groupBy(col).count().filter("count > 1");
if (!filteredData.isEmpty()) {
throw new C3rRuntimeException("Duplicates were found in column `" + col + "`, but `allowDuplicates` is false.");
}
}
// Check for duplicate null values when `preserveNulls` is false
if (!clientSettings.isPreserveNulls()) {
for (String col : fingerprintColumns) {
final Column column = new Column(col);
final Dataset<Row> filteredData = rawInputData.select(column)
.groupBy(column)
.count()
.filter(column.isNull())
.filter("count > 1");
if (!filteredData.isEmpty()) {
throw new C3rRuntimeException("Duplicates NULLs were found in column `" + col + "`, but `allowDuplicates` and " +
"`preserveNulls` are false.");
}
}
}
}
/**
* Map the source columns to their respective target columns.
*
* <p>
* This is normally handled by C3R's {@link com.amazonaws.c3r.action.RowMarshaller} by writing input columns of data to the intended
* target columns during the data load.
*
* @param rawInputData the Dataset to map
* @return A Dataset containing each target column
*/
static Dataset<Row> mapSourceToTargetColumns(final Dataset<Row> rawInputData) {
final List<Column> targetColumns = new ArrayList<>();
columnInsights.forEach(target -> targetColumns.add(functions.col(target.getSourceHeader().toString())
.as(target.getTargetHeader().toString())));
return rawInputData.select(CollectionConverters.IteratorHasAsScala(targetColumns.iterator()).asScala().toSeq());
}
/**
* Encrypt source data.
*
* @param rawInputData The source data to be encrypted
* @return The encrypted data
*/
static Dataset<Row> marshalData(final Dataset<Row> rawInputData) {
final ExpressionEncoder<Row> rowEncoder = RowEncoder.apply(rawInputData.schema());
return rawInputData.map((MapFunction<Row, Row>) row -> {
// Grab a nonce for the row
final Nonce nonce = Nonce.nextNonce();
// Build a list of transformers for each row, limiting state to keys/salts/settings POJOs
final Map<ColumnType, Transformer> transformers = Transformer.initTransformers(
KeyUtil.sharedSecretKeyFromString(EXAMPLE_SHARED_SECRET_KEY),
EXAMPLE_SALT,
ClientSettings.lowAssuranceMode(),
false); // Defaulting to false.
// For each column in the row, transform the data
return Row.fromSeq(
CollectionConverters.IteratorHasAsScala(columnInsights.stream().map(column -> {
if (column.getType() == ColumnType.CLEARTEXT) {
return row.get(column.getSourceColumnPosition());
}
final Transformer transformer = transformers.get(column.getType());
final String data = row.getString(column.getSourceColumnPosition());
final byte[] dataBytes = data == null ? null : data.getBytes(StandardCharsets.UTF_8);
final EncryptionContext encryptionContext = new EncryptionContext(column, nonce, ClientDataType.STRING);
final byte[] marshalledBytes = transformer.marshal(dataBytes, encryptionContext);
return (marshalledBytes == null ? null : new String(marshalledBytes, StandardCharsets.UTF_8));
}).iterator()).asScala().toSeq());
}, rowEncoder);
}
/**
* Decrypt source data.
*
* @param rawInputData The source data to be decrypted
* @return The cleartext data
*/
static Dataset<Row> unmarshalData(final Dataset<Row> rawInputData) {
final ExpressionEncoder<Row> rowEncoder = RowEncoder.apply(rawInputData.schema());
return rawInputData.map((MapFunction<Row, Row>) row -> {
// Build a list of transformers for each row, limiting state to keys/salts/settings POJOs
final Map<ColumnType, Transformer> transformers = Transformer.initTransformers(
KeyUtil.sharedSecretKeyFromString(EXAMPLE_SHARED_SECRET_KEY),
EXAMPLE_SALT,
ClientSettings.lowAssuranceMode(),
false); // Defaulting to false.
// For each column in the row, transform the data
final List<Object> unmarshalledValues = new ArrayList<>();
for (int i = 0; i < row.size(); i++) {
final String data = row.getString(i);
final byte[] dataBytes = data == null ? null : data.getBytes(StandardCharsets.UTF_8);
Transformer transformer = transformers.get(ColumnType.CLEARTEXT); // Default to pass through
if (Transformer.hasDescriptor(transformers.get(ColumnType.SEALED), dataBytes)) {
transformer = transformers.get(ColumnType.SEALED);
} else if (Transformer.hasDescriptor(transformers.get(ColumnType.FINGERPRINT), dataBytes)) {
transformer = transformers.get(ColumnType.FINGERPRINT);
}
final byte[] unmarshalledBytes = transformer.unmarshal(dataBytes);
unmarshalledValues.add(unmarshalledBytes == null ? null : new String(unmarshalledBytes, StandardCharsets.UTF_8));
}
return Row.fromSeq(
CollectionConverters.IteratorHasAsScala(unmarshalledValues.iterator()).asScala().toSeq());
}, rowEncoder);
}
/**
* Find the positions for each column.
*
* @param rawInputData The source data to map the columns with
*/
static void populateColumnPositions(final Dataset<Row> rawInputData) {
// Gather the positions of all the columns
final String[] columns = rawInputData.columns();
final Map<ColumnHeader, Integer> columnPositions = new HashMap<>();
for (int i = 0; i < columns.length; i++) {
columnPositions.put(new ColumnHeader(columns[i]), i);
}
for (ColumnInsight column : columnInsights) {
final int position = columnPositions.get(column.getTargetHeader());
column.setSourceColumnPosition(position);
}
}
/**
* Shuffles the input data to hide ordering.
*
* <p>
* This is normally handled by C3R's {@link com.amazonaws.c3r.action.RowMarshaller} by appending the Nonces used for each row to the
* data on load and then sorting on those nonces before writing out the data. Instead of sorting on Nonces created using Java's
* {@code SecureRandom}, Spark is using its own {@code rand()} function for the shuffle.
*
* @param rawInputData The Dataset to shuffle
* @return The shuffled Dataset
*/
static Dataset<Row> shuffleData(final Dataset<Row> rawInputData) {
return rawInputData.orderBy(functions.rand());
}
/**
* Shut down the Spark session.
*
* @param spark the SparkSession to close
*/
private static void closeSparkSession(final SparkSession spark) {
spark.stop();
}
}
| 2,389 |
0 | Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples | Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples/spark/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Sample code showing how to use the SDK with Spark.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.examples.spark; | 2,390 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/config/SparkDecryptConfigTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.config;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertThrowsExactly;
public class SparkDecryptConfigTest {
private String output;
private SparkDecryptConfig.SparkDecryptConfigBuilder minimalConfigBuilder(final String sourceFile) {
return SparkDecryptConfig.builder()
.secretKey(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getKey())
.source(sourceFile)
.targetDir(output)
.salt(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getSalt());
}
@BeforeEach
public void setup() throws IOException {
output = FileTestUtility.createTempDir().resolve("outputDir").toString();
}
@Test
public void minimumViableConstructionTest() {
assertDoesNotThrow(() -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput()).build());
}
@Test
public void validateInputEmptyTest() {
assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput())
.source("").build());
}
@Test
public void validateOutputEmptyTest() {
assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput())
.targetDir("").build());
}
@Test
public void validateNoOverwriteTest() throws IOException {
output = FileTestUtility.createTempFile().toString();
assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput())
.overwrite(false).build());
}
@Test
public void validateOverwriteTest() throws IOException {
output = FileTestUtility.createTempDir().toString();
assertDoesNotThrow(() -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput())
.overwrite(true).build());
}
@Test
public void validateEmptySaltTest() {
assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput())
.salt("").build());
}
@Test
public void validateFileExtensionWhenInputIsDirectoryTest() {
assertDoesNotThrow(() -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput())
.source(FileTestUtility.createTempDir().toString())
.overwrite(true)
.fileFormat(FileFormat.PARQUET)
.build());
}
@Test
public void validateNoFileExtensionWhenInputIsDirectoryTest() {
assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput())
.source(FileTestUtility.createTempDir().toString())
.overwrite(true)
.build());
}
@Test
public void unknownFileExtensionTest() throws IOException {
final String pathWithUnknownExtension = FileTestUtility.createTempFile("input", ".unknown").toString();
// unknown extensions cause failure if no FileFormat is specified
assertThrows(C3rIllegalArgumentException.class, () ->
minimalConfigBuilder(pathWithUnknownExtension).build());
// specifying a FileFormat makes it work
assertDoesNotThrow(() ->
minimalConfigBuilder(pathWithUnknownExtension)
.fileFormat(FileFormat.CSV)
.build());
}
@Test
public void csvOptionsNonCsvFileFormatForFileTest() throws IOException {
final String parquetPath = FileTestUtility.createTempFile("input", ".parquet").toString();
// parquet file is fine
assertDoesNotThrow(() ->
minimalConfigBuilder(parquetPath).build());
// parquet file with csvInputNullValue errors
assertThrows(C3rIllegalArgumentException.class, () ->
minimalConfigBuilder(parquetPath)
.csvInputNullValue("")
.build());
// parquet file with csvOutputNullValue errors
assertThrows(C3rIllegalArgumentException.class, () ->
minimalConfigBuilder(parquetPath)
.csvOutputNullValue("")
.build());
}
@Test
public void csvOptionNonCsvFileFormatForDirectoryTest() throws IOException {
// Use an input directory
final var config = minimalConfigBuilder(FileTestUtility.createTempDir().toString())
.overwrite(true)
.fileFormat(FileFormat.PARQUET);
// Parquet file format by itself is fine
assertDoesNotThrow(() -> config.build());
// Parquet format with an input CSV null value specified is not accepted
assertThrowsExactly(C3rIllegalArgumentException.class, () -> config.csvInputNullValue("NULL").build());
// Parquet format with an output CSV null value specified is not accepted
assertThrowsExactly(C3rIllegalArgumentException.class, () -> config.csvOutputNullValue("NULL").build());
}
}
| 2,391 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/config/SparkEncryptConfigTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.config;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.MappedTableSchema;
import com.amazonaws.c3r.config.PositionalTableSchema;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.spark.action.SparkMarshaller;
import com.amazonaws.c3r.spark.io.CsvTestUtility;
import com.amazonaws.c3r.spark.io.csv.SparkCsvReader;
import com.amazonaws.c3r.spark.io.csv.SparkCsvWriter;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.TEST_CONFIG_DATA_SAMPLE;
import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT;
import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.cleartextColumn;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertThrowsExactly;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class SparkEncryptConfigTest {
private String output;
private SparkEncryptConfig.SparkEncryptConfigBuilder minimalConfigBuilder(final String sourceFile) {
return SparkEncryptConfig.builder()
.secretKey(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getKey())
.source(sourceFile)
.targetDir(output)
.salt(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getSalt())
.settings(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getSettings())
.tableSchema(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getSchema());
}
// Helper function for calling row marshaller on settings.
private void runConfig(final SparkEncryptConfig config) {
final SparkSession session = SparkSessionTestUtility.initSparkSession();
final Dataset<Row> dataset = SparkCsvReader.readInput(session,
config.getSourceFile(),
config.getCsvInputNullValue(),
config.getTableSchema().getPositionalColumnHeaders());
final Dataset<Row> marshalledDataset = SparkMarshaller.encrypt(dataset, config);
SparkCsvWriter.writeOutput(marshalledDataset, config.getTargetFile(), config.getCsvOutputNullValue());
}
@BeforeEach
public void setup() throws IOException {
output = FileTestUtility.createTempDir().resolve("outputDir").toString();
}
@Test
public void minimumViableConstructionTest() {
assertDoesNotThrow(() -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput())
.build());
}
// Make sure input file must be specified.
@Test
public void validateInputBlankTest() {
assertThrowsExactly(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(
TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput())
.source("").build());
}
@Test
public void validateOutputEmptyTest() {
assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput())
.targetDir("").build());
}
@Test
public void validateNoOverwriteTest() throws IOException {
output = FileTestUtility.createTempDir().toString();
assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput())
.overwrite(false).build());
}
@Test
public void validateOverwriteTest() throws IOException {
output = FileTestUtility.createTempDir().toString();
assertDoesNotThrow(() -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput())
.overwrite(true).build());
}
@Test
public void validateEmptySaltTest() {
assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput())
.salt("").build());
}
@Test
public void validateFileExtensionWhenInputIsDirectoryTest() {
assertDoesNotThrow(() -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput())
.source(FileTestUtility.createTempDir().toString())
.overwrite(true)
.fileFormat(FileFormat.PARQUET)
.build());
}
@Test
public void validateNoFileExtensionWhenInputIsDirectoryTest() {
assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput())
.source(FileTestUtility.createTempDir().toString())
.overwrite(true)
.build());
}
@Test
public void unknownFileExtensionTest() throws IOException {
final String pathWithUnknownExtension = FileTestUtility.createTempFile("input", ".unknown").toString();
// unknown extensions cause failure if no FileFormat is specified
assertThrowsExactly(C3rIllegalArgumentException.class, () ->
minimalConfigBuilder(pathWithUnknownExtension).build());
// specifying a FileFormat makes it work
assertDoesNotThrow(() ->
minimalConfigBuilder(pathWithUnknownExtension)
.fileFormat(FileFormat.CSV)
.build());
}
@Test
public void csvOptionsNonCsvFileFormatForFileTest() throws IOException {
final String parquetPath = FileTestUtility.createTempFile("input", ".parquet").toString();
// parquet file is fine
assertDoesNotThrow(() ->
minimalConfigBuilder(parquetPath).build());
// parquet file with csvInputNullValue errors
assertThrowsExactly(C3rIllegalArgumentException.class, () ->
minimalConfigBuilder(parquetPath)
.csvInputNullValue("")
.build());
// parquet file with csvOutputNullValue errors
assertThrowsExactly(C3rIllegalArgumentException.class, () ->
minimalConfigBuilder(parquetPath)
.csvOutputNullValue("")
.build());
}
@Test
public void csvOptionNonCsvFileFormatForDirectoryTest() throws IOException {
// Use an input directory
final var config = minimalConfigBuilder(FileTestUtility.createTempDir().toString())
.overwrite(true)
.fileFormat(FileFormat.PARQUET);
// Parquet file format by itself is fine
assertDoesNotThrow(() -> config.build());
// Parquet format with an input CSV null value specified is not accepted
assertThrowsExactly(C3rIllegalArgumentException.class, () -> config.csvInputNullValue("NULL").build());
// Parquet format with an output CSV null value specified is not accepted
assertThrowsExactly(C3rIllegalArgumentException.class, () -> config.csvOutputNullValue("NULL").build());
}
// Make sure positional schema and file that are equivalent to file and schema with headers.
@Test
public void noHeaderFileProducesCorrectResultsTest() throws IOException {
final String noHeadersFile = "../samples/csv/data_sample_no_headers.csv";
final TableSchema noHeadersSchema = new PositionalTableSchema(List.of(
List.of(cleartextColumn(null, "FirstName")),
List.of(cleartextColumn(null, "LastName")),
List.of(cleartextColumn(null, "Address")),
List.of(cleartextColumn(null, "City")),
List.of(cleartextColumn(null, "State")),
List.of(cleartextColumn(null, "PhoneNumber")),
List.of(cleartextColumn(null, "Title")),
List.of(cleartextColumn(null, "Level")),
List.of(cleartextColumn(null, "Notes"))
));
final String headersFile = "../samples/csv/data_sample_without_quotes.csv";
final TableSchema headersSchema = new MappedTableSchema(List.of(
cleartextColumn("FirstName"),
cleartextColumn("LastName"),
cleartextColumn("Address"),
cleartextColumn("City"),
cleartextColumn("State"),
cleartextColumn("PhoneNumber"),
cleartextColumn("Title"),
cleartextColumn("Level"),
cleartextColumn("Notes")
));
final SparkEncryptConfig noHeadersConfig = SparkEncryptConfig.builder()
.source(noHeadersFile)
.targetDir(FileTestUtility.createTempDir().resolve("encryptedNoHeaders").toString())
.overwrite(true)
.csvInputNullValue(null)
.csvOutputNullValue(null)
.secretKey(TEST_CONFIG_DATA_SAMPLE.getKey())
.salt(TEST_CONFIG_DATA_SAMPLE.getSalt())
.settings(TEST_CONFIG_DATA_SAMPLE.getSettings())
.tableSchema(noHeadersSchema)
.build();
runConfig(noHeadersConfig);
final Path mergedNoHeadersOutput = CsvTestUtility.mergeOutput(Path.of(noHeadersConfig.getTargetFile()));
final SparkEncryptConfig headersConfig = SparkEncryptConfig.builder()
.source(headersFile)
.targetDir(FileTestUtility.createTempDir().resolve("encryptedHeaders").toString())
.overwrite(true)
.csvInputNullValue(null)
.csvOutputNullValue(null)
.secretKey(TEST_CONFIG_DATA_SAMPLE.getKey())
.salt(TEST_CONFIG_DATA_SAMPLE.getSalt())
.settings(TEST_CONFIG_DATA_SAMPLE.getSettings())
.tableSchema(headersSchema)
.build();
runConfig(headersConfig);
final Path mergedHeadersOutput = CsvTestUtility.mergeOutput(Path.of(headersConfig.getTargetFile()));
final List<String> noHeaderLines = Files.readAllLines(mergedNoHeadersOutput);
final List<String> headerLines = Files.readAllLines(mergedHeadersOutput);
assertEquals(headerLines.size(), noHeaderLines.size());
noHeaderLines.sort(String::compareTo);
headerLines.sort(String::compareTo);
for (int i = 0; i < headerLines.size(); i++) {
assertEquals(0, headerLines.get(i).compareTo(noHeaderLines.get(i)));
}
}
// Make sure custom null values work with positional schemas.
@Test
public void customNullValueWithPositionalSchemaTest() throws IOException {
final String noHeadersFile = "../samples/csv/data_sample_no_headers.csv";
final TableSchema noHeadersSchema = new PositionalTableSchema(List.of(
List.of(cleartextColumn(null, "FirstName")),
List.of(cleartextColumn(null, "LastName")),
List.of(cleartextColumn(null, "Address")),
List.of(cleartextColumn(null, "City")),
List.of(cleartextColumn(null, "State")),
List.of(cleartextColumn(null, "PhoneNumber")),
List.of(cleartextColumn(null, "Title")),
List.of(cleartextColumn(null, "Level")),
List.of(cleartextColumn(null, "Notes"))
));
final SparkEncryptConfig noHeadersConfig = SparkEncryptConfig.builder()
.source(noHeadersFile)
.targetDir(output)
.overwrite(true)
.csvInputNullValue("John")
.csvOutputNullValue("NULLJOHNNULL")
.secretKey(TEST_CONFIG_DATA_SAMPLE.getKey())
.salt(TEST_CONFIG_DATA_SAMPLE.getSalt())
.settings(TEST_CONFIG_DATA_SAMPLE.getSettings())
.tableSchema(noHeadersSchema)
.build();
runConfig(noHeadersConfig);
final Path mergedNoHeadersOutput = CsvTestUtility.mergeOutput(Path.of(noHeadersConfig.getTargetFile()));
final List<String> noHeaderLines = Files.readAllLines(mergedNoHeadersOutput);
boolean foundNull = false;
for (String row : noHeaderLines) {
foundNull |= row.startsWith("NULLJOHNNULL,Smith");
}
assertTrue(foundNull);
}
// Check that validation fails because cleartext columns aren't allowed but cleartext columns are in the schema.
@Test
void checkAllowCleartextValidationTest() {
final String noHeadersFile = "../samples/csv/data_sample_no_headers.csv";
final TableSchema schema = new MappedTableSchema(List.of(cleartextColumn("cleartext")));
final var config = SparkEncryptConfig.builder()
.source(noHeadersFile)
.targetDir(output)
.overwrite(true)
.secretKey(TEST_CONFIG_DATA_SAMPLE.getKey())
.salt(TEST_CONFIG_DATA_SAMPLE.getSalt())
.tableSchema(schema);
final Exception e = assertThrowsExactly(C3rIllegalArgumentException.class,
() -> config.settings(ClientSettings.highAssuranceMode()).build());
assertEquals("Cleartext columns found in the schema, but allowCleartext is false. Target column names: [`cleartext`]",
e.getMessage());
assertDoesNotThrow(() -> config.settings(ClientSettings.lowAssuranceMode()).build());
}
}
| 2,392 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io/ParquetTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.data.ParquetSchema;
import com.amazonaws.c3r.data.ParquetValue;
import com.amazonaws.c3r.data.Row;
import com.amazonaws.c3r.io.ParquetRowReader;
import com.amazonaws.c3r.io.ParquetRowWriter;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* Utility functions for reading Parquet data out of files.
*/
public final class ParquetTestUtility {
/**
* Hidden utility class constructor.
*/
private ParquetTestUtility() {
}
/**
* Takes a row of Parquet values and returns them as an array of string values ordered by column indices.
*
* @param row Parquet values looked up by name
* @param indices Mapping of column index to name
* @return Ordered Parquet values converted to strings
*/
private static String[] rowToStringArray(final Row<ParquetValue> row, final Map<Integer, ColumnHeader> indices) {
final String[] strings = new String[row.size()];
for (int i = 0; i < row.size(); i++) {
strings[i] = Objects.requireNonNullElse(row.getValue(indices.get(i)).toString(), "");
}
return strings;
}
/**
* Reads a Parquet file into a list of ordered string values.
*
* @param filePath Location of the file to read
* @return Contents of the file as a list of rows and the rows are string values
*/
public static List<String[]> readContentAsStringArrays(final String filePath) {
final ParquetRowReader reader = ParquetRowReader.builder().sourceName(filePath).build();
final ParquetSchema parquetSchema = reader.getParquetSchema();
final Map<Integer, ColumnHeader> columnIndices = parquetSchema.getHeaders().stream()
.collect(Collectors.toMap(
parquetSchema::getColumnIndex,
Function.identity()
));
final var mapRows = readAllRows(reader);
return mapRows.stream().map(row -> rowToStringArray(row, columnIndices)).collect(Collectors.toList());
}
/**
* Reads all the rows from a Parquet file to their Parquet type.
*
* @param reader Reads a particular Parquet file
* @return Contents of the file as a list of rows with Parquet values
*/
public static List<Row<ParquetValue>> readAllRows(final ParquetRowReader reader) {
final var rows = new ArrayList<Row<ParquetValue>>();
while (reader.hasNext()) {
final var row = reader.next();
rows.add(row);
}
return rows;
}
private static List<Path> getOutputPaths(final Path output) {
return Arrays.stream(Objects.requireNonNull(output.toFile().listFiles()))
.filter(file -> file.getAbsolutePath().endsWith(".parquet"))
.map(File::toPath)
.collect(Collectors.toList());
}
public static Path mergeOutput(final Path output) throws IOException {
final Path mergedOutput = FileTestUtility.createTempFile("test", ".parquet");
final List<Path> paths = getOutputPaths(output);
final List<Row<ParquetValue>> mergedLines = new ArrayList<>();
ParquetRowReader reader;
ParquetSchema parquetSchema = null;
for (Path p : paths) {
reader = ParquetRowReader.builder().sourceName(p.toString()).build();
if (parquetSchema == null) {
parquetSchema = reader.getParquetSchema();
}
mergedLines.addAll(readAllRows(reader));
}
final ParquetRowWriter writer = ParquetRowWriter.builder()
.parquetSchema(parquetSchema)
.targetName(mergedOutput.toString()).build();
for (Row<ParquetValue> row : mergedLines) {
writer.writeRow(row);
}
writer.flush();
writer.close();
return mergedOutput;
}
}
| 2,393 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io/CsvTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import com.univocity.parsers.csv.CsvParser;
import com.univocity.parsers.csv.CsvParserSettings;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;
/**
* Utility functions for common CSV data manipulation needed during testing.
*/
public final class CsvTestUtility {
/**
* Hidden utility class constructor.
*/
private CsvTestUtility() {
}
/**
* Creates a simple CSV parser for the specified columns that will read out {@code maxColumns}.
*
* @param fileName Location of the file to read
* @param maxColumns Maximum number of columns expected from file
* @return Parser for getting file contents
* @throws RuntimeException If the CSV file is not found
*/
public static CsvParser getCsvParser(final String fileName, final Integer maxColumns) {
try {
final CsvParserSettings settings = getBasicParserSettings(maxColumns, false);
// creates a CSV parser
final CsvParser parser = new CsvParser(settings);
final InputStreamReader reader = new InputStreamReader(new FileInputStream(fileName), StandardCharsets.UTF_8);
parser.beginParsing(reader);
return parser;
} catch (FileNotFoundException e) {
throw new RuntimeException(e);
}
}
/**
* Create basic parser settings that don't modify/NULL any values
* aside from the default whitespace trimming.
*
* @param maxColumns Most columns allowed in the CSV file
* @param keepQuotes If quotes should be kept as part of the string read in or not
* @return Settings to bring up a simple CSV parser
*/
private static CsvParserSettings getBasicParserSettings(final Integer maxColumns, final boolean keepQuotes) {
final CsvParserSettings settings = new CsvParserSettings();
settings.setLineSeparatorDetectionEnabled(true);
settings.setNullValue("");
settings.setEmptyValue("\"\"");
settings.setKeepQuotes(keepQuotes);
if (maxColumns != null) {
settings.setMaxColumns(maxColumns);
}
return settings;
}
/**
* Read the contents of the CSV file as rows, mapping column names to content.
*
* <p>
* The column names are normalized per the C3R's normalizing (lower-cased and whitespace trimmed).
*
* @param fileName File to read
* @return Rows read in the order they appear
* @throws C3rIllegalArgumentException If the file does not have the same number of entries in each row
*/
public static List<Map<String, String>> readRows(final String fileName) {
final CsvParserSettings settings = getBasicParserSettings(null, true);
settings.setHeaderExtractionEnabled(true);
final CsvParser parser = new CsvParser(settings);
return parser.parseAllRecords(new File(fileName)).stream().map(r -> r.toFieldMap()).collect(Collectors.toList());
}
/**
* Read the file content with rows as arrays. There is no mapping to column headers, if any, in the file.
*
* @param fileName Location of file to read
* @param keepQuotes If quotes should be kept as part of the string read in or not
* @return List of rows where each row is an array of values
* @throws RuntimeException If the file is not found
*/
public static List<String[]> readContentAsArrays(final String fileName, final boolean keepQuotes) {
final CsvParserSettings settings = getBasicParserSettings(null, keepQuotes);
return new CsvParser(settings).parseAll(new File(fileName), StandardCharsets.UTF_8);
}
private static List<Path> getOutputPaths(final Path output) {
return Arrays.stream(Objects.requireNonNull(output.toFile().listFiles()))
.filter(file -> file.getAbsolutePath().endsWith(".csv"))
.map(File::toPath)
.collect(Collectors.toList());
}
public static Path mergeOutput(final Path output) throws IOException {
final Path mergedOutput = FileTestUtility.createTempFile("test", ".csv");
final List<Path> paths = getOutputPaths(output);
final List<String> mergedLines = new ArrayList<>();
for (Path p : paths) {
final List<String> lines = Files.readAllLines(p, StandardCharsets.UTF_8);
if (!lines.isEmpty()) {
if (mergedLines.isEmpty()) {
mergedLines.add(lines.get(0)); //add header only once
}
mergedLines.addAll(new ArrayList<>(lines.subList(1, lines.size()))); // Negative limit allows trailing empty space
}
}
Files.write(mergedOutput, mergedLines);
return mergedOutput;
}
}
| 2,394 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io/schema/ParquetSchemaGeneratorTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.schema;
import com.amazonaws.c3r.data.ClientDataType;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import com.amazonaws.c3r.spark.utils.GeneralTestUtility;
import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility;
import org.apache.spark.SparkException;
import org.apache.spark.sql.SparkSession;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class ParquetSchemaGeneratorTest {
private final SparkSession sparkSession = SparkSessionTestUtility.initSparkSession();
private ParquetSchemaGenerator getTestSchemaGenerator(final String file) throws IOException {
final String output = FileTestUtility.resolve("schema.json").toString();
return ParquetSchemaGenerator.builder()
.inputParquetFile(file)
.targetJsonFile(output)
.overwrite(true)
.sparkSession(sparkSession)
.build();
}
@Test
public void getSourceHeadersTest() throws IOException {
assertEquals(
GeneralTestUtility.DATA_SAMPLE_HEADERS,
getTestSchemaGenerator("../samples/parquet/data_sample.parquet").getSourceHeaders());
}
@Test
public void getSourceColumnCountTest() throws IOException {
assertEquals(
Collections.nCopies(GeneralTestUtility.DATA_SAMPLE_HEADERS.size(), ClientDataType.STRING),
getTestSchemaGenerator("../samples/parquet/data_sample.parquet").getSourceColumnTypes());
}
@Test
public void getSourceColumnTypesTest() throws IOException {
assertEquals(
List.of(ClientDataType.UNKNOWN,
ClientDataType.STRING,
ClientDataType.UNKNOWN,
ClientDataType.UNKNOWN,
ClientDataType.UNKNOWN,
ClientDataType.UNKNOWN,
ClientDataType.UNKNOWN,
ClientDataType.UNKNOWN,
ClientDataType.UNKNOWN),
getTestSchemaGenerator("../samples/parquet/rows_100_groups_10_prim_data.parquet").getSourceColumnTypes());
}
@Test
public void emptyFileTest() throws IOException {
final String emptyParquetFile = FileTestUtility.createTempFile("empty", ".parquet").toString();
assertThrows(SparkException.class, () ->
getTestSchemaGenerator(emptyParquetFile));
}
} | 2,395 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io/schema/InteractiveSchemaGeneratorTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.schema;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.Pad;
import com.amazonaws.c3r.config.PadType;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.data.ClientDataType;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.internal.Limits;
import com.amazonaws.c3r.json.GsonUtil;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import com.amazonaws.c3r.utils.FileUtil;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.BufferedReader;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.io.StringReader;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class InteractiveSchemaGeneratorTest {
private final List<ColumnHeader> headers = Stream.of(
"header1",
"header2",
"header3"
).map(ColumnHeader::new)
.collect(Collectors.toList());
private final List<ClientDataType> stringColumnTypes = Collections.nCopies(headers.size(), ClientDataType.STRING);
private final List<ClientDataType> unknownColumnTypes = Collections.nCopies(headers.size(), ClientDataType.UNKNOWN);
private final String exampleMappedSchemaString =
String.join("\n",
"{",
" \"headerRow\": true,",
" \"columns\": [",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"targetheader2_sealed\",",
" \"type\": \"sealed\",",
" \"pad\": {",
" \"type\": \"NONE\"",
" }",
" },",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"targetheader2_fingerprint\",",
" \"type\": \"fingerprint\"",
" },",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"targetheader2\",",
" \"type\": \"cleartext\"",
" },",
" {",
" \"sourceHeader\": \"header3\",",
" \"targetHeader\": \"header3\",",
" \"type\": \"sealed\",",
" \"pad\": {",
" \"type\": \"MAX\",",
" \"length\": \"0\"",
" }",
" }",
" ]",
"}");
private final String exampleMappedSchemaNoCleartextString =
String.join("\n",
"{",
" \"headerRow\": true,",
" \"columns\": [",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"targetheader2_sealed\",",
" \"type\": \"SEALED\",",
" \"pad\": {",
" \"type\": \"NONE\"",
" }",
" },",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"targetheader2_fingerprint\",",
" \"type\": \"FINGERPRINT\"",
" },",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"targetheader2\",",
" \"type\": \"FINGERPRINT\"",
" },",
" {",
" \"sourceHeader\": \"header3\",",
" \"targetHeader\": \"header3\",",
" \"type\": \"SEALED\",",
" \"pad\": {",
" \"type\": \"MAX\",",
" \"length\": \"0\"",
" }",
" }",
" ]",
"}");
private final String examplePositionalSchemaString =
String.join("\n",
"{",
" \"headerRow\": false,",
" \"columns\": [",
" [],",
" [",
" {",
" \"type\": \"sealed\",",
" \"pad\": {",
" \"type\": \"NONE\"",
" },",
" \"targetHeader\": \"targetheader2_sealed\"",
" },",
" {",
" \"type\": \"fingerprint\",",
" \"targetHeader\": \"targetheader2_fingerprint\"",
" },",
" {",
" \"type\": \"cleartext\",",
" \"targetHeader\": \"targetheader2\"",
" }",
" ],",
" [",
" {",
" \"type\": \"sealed\",",
" \"pad\": {",
" \"type\": \"MAX\",",
" \"length\": 0",
" },",
" \"targetHeader\": \"targetheader3\"",
" }",
" ]",
" ]",
"}");
private final String exampleMappedSchemaAllCleartextString =
String.join("\n",
"{",
" \"headerRow\": true,",
" \"columns\": [",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"targetheader2_1\",",
" \"type\": \"cleartext\"",
" },",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"targetheader2_2\",",
" \"type\": \"cleartext\"",
" },",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"targetheader2_3\",",
" \"type\": \"cleartext\"",
" },",
" {",
" \"sourceHeader\": \"header3\",",
" \"targetHeader\": \"header3\",",
" \"type\": \"cleartext\"",
" }",
" ]",
"}");
private final String examplePositionalSchemaAllCleartextString =
String.join("\n",
"{",
" \"headerRow\": false,",
" \"columns\": [",
" [],",
" [",
" {",
" \"type\": \"cleartext\",",
" \"targetHeader\": \"targetheader2_1\"",
" },",
" {",
" \"type\": \"cleartext\",",
" \"targetHeader\": \"targetheader2_2\"",
" },",
" {",
" \"type\": \"cleartext\",",
" \"targetHeader\": \"targetheader2_3\"",
" }",
" ],",
" [",
" {",
" \"type\": \"cleartext\",",
" \"targetHeader\": \"targetheader3\"",
" }",
" ]",
" ]",
"}");
private InteractiveSchemaGenerator schemaGen;
private Path targetSchema;
private ByteArrayOutputStream consoleOutput;
@BeforeEach
public void setup() throws IOException {
targetSchema = FileTestUtility.resolve("schema.json");
}
// Set up the interactive generator.
private void createInteractiveSchemaGenerator(final String simulatedUserInput,
final List<ColumnHeader> headers,
final List<ClientDataType> types,
final ClientSettings clientSettings) {
final var userInput = new BufferedReader(new StringReader(simulatedUserInput + "\n"));
consoleOutput = new ByteArrayOutputStream();
schemaGen = InteractiveSchemaGenerator.builder()
.sourceHeaders(headers)
.sourceColumnTypes(types)
.targetJsonFile(targetSchema.toString())
.consoleInput(userInput)
.consoleOutput(new PrintStream(consoleOutput, true, StandardCharsets.UTF_8))
.clientSettings(clientSettings)
.build();
}
@Test
public void validateErrorWithMismatchedColumnCounts() {
assertThrows(C3rIllegalArgumentException.class, () ->
createInteractiveSchemaGenerator("", headers, List.of(), null));
}
@Test
public void validateUnexpectedUserInputEndError() {
final List<String> incompleteUserInputs = List.of("", "0", "0\n", "0\n0", "0\n0\n");
final Consumer<String> schemaGenRunner = (userInput) ->
InteractiveSchemaGenerator.builder()
.sourceHeaders(headers)
.sourceColumnTypes(stringColumnTypes)
.targetJsonFile(targetSchema.toString())
.consoleInput(new BufferedReader(new StringReader(userInput)))
.consoleOutput(new PrintStream(new ByteArrayOutputStream(), true, StandardCharsets.UTF_8))
.clientSettings(null)
.build()
.run();
for (var input : incompleteUserInputs) {
assertThrows(C3rRuntimeException.class, () -> schemaGenRunner.accept(input));
}
assertDoesNotThrow(() -> schemaGenRunner.accept("0\n0\n0"));
}
@Test
public void promptNonnegativeIntValidTest() {
final List<String> validInputs = List.of("42", "0", "100");
for (var input : validInputs) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, null);
assertEquals(
Integer.valueOf(input),
schemaGen.promptNonNegativeInt("", null, 100));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
}
@Test
public void promptNonnegativeIntInvalidTest() {
final List<String> validInputs = List.of("", "NotANumber", "-1", "101");
for (var input : validInputs) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, null);
assertNull(schemaGen.promptNonNegativeInt("", null, 100));
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
}
}
@Test
public void promptNonNegativeIntValidDefaultTest() {
final List<String> validInputs = List.of("1", "", "3");
for (var input : validInputs) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, null);
assertEquals(
input.isBlank() ? 2 : Integer.parseInt(input),
schemaGen.promptNonNegativeInt("", 2, 100));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
}
@Test
public void promptYesOrNoValidTest() {
final List<Boolean> defaultBooleanAnswers = Arrays.asList(null, true, false);
final List<String> validYesStrings = List.of("y", "yes", "Y", "YES");
for (var input : validYesStrings) {
for (var answer : defaultBooleanAnswers) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, null);
assertTrue(schemaGen.promptYesOrNo("", answer));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
}
final List<String> validNoStrings = List.of("n", "no", "N", "NO");
for (var input : validNoStrings) {
for (var answer : defaultBooleanAnswers) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, null);
assertFalse(schemaGen.promptYesOrNo("", answer));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
}
for (var answer : defaultBooleanAnswers) {
createInteractiveSchemaGenerator("", headers, stringColumnTypes, null);
assertEquals(answer, schemaGen.promptYesOrNo("", answer));
if (answer == null) {
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
} else {
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
}
}
@Test
public void promptYesOrNoInvalidTest() {
createInteractiveSchemaGenerator("", headers, stringColumnTypes, null);
assertNull(schemaGen.promptYesOrNo("", null));
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("ja", headers, stringColumnTypes, null);
assertNull(schemaGen.promptYesOrNo("", null));
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("nein", headers, stringColumnTypes, null);
assertNull(schemaGen.promptYesOrNo("", null));
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void promptColumnTypeValidTest() {
final List<String> validCleartextInputs = List.of("c", "C", "cleartext", "CLEARTEXT");
final List<ClientSettings> permissiveSettings = new ArrayList<>();
permissiveSettings.add(null);
permissiveSettings.add(ClientSettings.lowAssuranceMode());
for (var settings : permissiveSettings) {
for (var input : validCleartextInputs) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, settings);
assertEquals(ColumnType.CLEARTEXT, schemaGen.promptColumnType());
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
final List<String> validFingerprintInputs = List.of("f", "F", "fingerprint", "FINGERPRINT");
for (var input : validFingerprintInputs) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, settings);
assertEquals(ColumnType.FINGERPRINT, schemaGen.promptColumnType());
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
final List<String> validSealedInputs = List.of("s", "S", "sealed", "SEALED");
for (var input : validSealedInputs) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, settings);
assertEquals(ColumnType.SEALED, schemaGen.promptColumnType());
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
}
}
@Test
public void promptColumnTypeRestrictiveSettingsTest() {
final List<String> validCleartextInputs = List.of("c", "C", "cleartext", "CLEARTEXT");
for (var input : validCleartextInputs) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, ClientSettings.highAssuranceMode());
assertNull(schemaGen.promptColumnType());
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
}
final List<String> validFingerprintInputs = List.of("f", "F", "fingerprint", "FINGERPRINT");
for (var input : validFingerprintInputs) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, ClientSettings.highAssuranceMode());
assertEquals(ColumnType.FINGERPRINT, schemaGen.promptColumnType());
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
final List<String> validSealedInputs = List.of("s", "S", "sealed", "SEALED");
for (var input : validSealedInputs) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, ClientSettings.highAssuranceMode());
assertEquals(ColumnType.SEALED, schemaGen.promptColumnType());
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
}
@Test
public void promptColumnTypeInvalidTest() {
final List<String> validCleartextInputs = List.of("", "a", "unrostricted", "solekt", "joyn");
for (var input : validCleartextInputs) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, null);
assertNull(schemaGen.promptColumnType());
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
}
}
@Test
public void promptTargetHeaderSuffixTest() {
createInteractiveSchemaGenerator("", headers, stringColumnTypes, null);
assertNull(schemaGen.promptTargetHeaderSuffix(ColumnType.CLEARTEXT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("y", headers, stringColumnTypes, null);
assertEquals("_sealed", schemaGen.promptTargetHeaderSuffix(ColumnType.SEALED));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("n", headers, stringColumnTypes, null);
assertNull(schemaGen.promptTargetHeaderSuffix(ColumnType.SEALED));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("", headers, stringColumnTypes, null);
assertEquals("_fingerprint", schemaGen.promptTargetHeaderSuffix(ColumnType.FINGERPRINT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("n", headers, stringColumnTypes, null);
assertNull(schemaGen.promptTargetHeaderSuffix(ColumnType.FINGERPRINT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void promptTargetHeaderTest() {
createInteractiveSchemaGenerator("", headers, stringColumnTypes, null);
assertEquals(new ColumnHeader("a"), schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.CLEARTEXT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("b", headers, stringColumnTypes, null);
assertEquals(new ColumnHeader("b"), schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.CLEARTEXT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
assertFalse(consoleOutput.toString().toLowerCase().contains("normalized"));
createInteractiveSchemaGenerator("B", headers, stringColumnTypes, null);
assertEquals(new ColumnHeader("b"), schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.CLEARTEXT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
assertTrue(consoleOutput.toString().toLowerCase().contains("normalized"));
createInteractiveSchemaGenerator("b".repeat(Limits.GLUE_MAX_HEADER_UTF8_BYTE_LENGTH) + 1, headers, stringColumnTypes, null);
assertNull(schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.CLEARTEXT));
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void promptTargetHeaderWithoutSourceHeadersTest() {
// empty input does _not_ give you a default target header when no source headers exist
createInteractiveSchemaGenerator("", null, stringColumnTypes, null);
assertNull(schemaGen.promptTargetHeader(null, ColumnType.CLEARTEXT));
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
// providing input for a target header when source headers are null remains unchanged
createInteractiveSchemaGenerator("b", headers, stringColumnTypes, null);
assertEquals(new ColumnHeader("b"), schemaGen.promptTargetHeader(null, ColumnType.CLEARTEXT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("B", headers, stringColumnTypes, null);
assertEquals(new ColumnHeader("b"), schemaGen.promptTargetHeader(null, ColumnType.CLEARTEXT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
assertTrue(consoleOutput.toString().toLowerCase().contains("normalized"));
}
@Test
public void promptTargetHeaderAlreadyUsedHeaderTest() {
createInteractiveSchemaGenerator("\n", headers, stringColumnTypes, null);
assertEquals(new ColumnHeader("header"), schemaGen.promptTargetHeader(new ColumnHeader("header"), ColumnType.CLEARTEXT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
assertNull(schemaGen.promptTargetHeader(new ColumnHeader("header"), ColumnType.CLEARTEXT));
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void promptTargetHeaderWithSuffixTest() {
final String suffix = ColumnHeader.DEFAULT_FINGERPRINT_SUFFIX;
createInteractiveSchemaGenerator("\n", headers, stringColumnTypes, null);
assertEquals(
new ColumnHeader("a_fingerprint"),
schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.FINGERPRINT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("b".repeat(Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH - suffix.length())
+ "\n", headers, stringColumnTypes, null);
assertEquals(
new ColumnHeader(
"b".repeat(Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH - suffix.length())
+ suffix),
schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.FINGERPRINT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void promptTargetHeaderCannotAddSuffixTest() {
createInteractiveSchemaGenerator("a".repeat(Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH)
+ "\n", headers, stringColumnTypes, null);
assertNull(schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.FINGERPRINT));
assertTrue(consoleOutput.toString().toLowerCase().contains("unable to add header suffix"));
}
@Test
public void promptPadTypeTest() {
final var header = new ColumnHeader("a");
final PadType nullDefaultType = null;
createInteractiveSchemaGenerator("", headers, stringColumnTypes, null);
assertNull(schemaGen.promptPadType(header, nullDefaultType));
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("", headers, stringColumnTypes, null);
assertEquals(PadType.MAX, schemaGen.promptPadType(header, PadType.MAX));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("n", headers, stringColumnTypes, null);
assertEquals(PadType.NONE, schemaGen.promptPadType(header, nullDefaultType));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("none", headers, stringColumnTypes, null);
assertEquals(PadType.NONE, schemaGen.promptPadType(header, nullDefaultType));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("f", headers, stringColumnTypes, null);
assertEquals(PadType.FIXED, schemaGen.promptPadType(header, nullDefaultType));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("fixed", headers, stringColumnTypes, null);
assertEquals(PadType.FIXED, schemaGen.promptPadType(header, nullDefaultType));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("m", headers, stringColumnTypes, null);
assertEquals(PadType.MAX, schemaGen.promptPadType(header, nullDefaultType));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("max", headers, stringColumnTypes, null);
assertEquals(PadType.MAX, schemaGen.promptPadType(header, nullDefaultType));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("unknown", headers, stringColumnTypes, null);
assertNull(schemaGen.promptPadType(header, nullDefaultType));
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void promptPadTest() {
final var header = new ColumnHeader("a");
createInteractiveSchemaGenerator("n", headers, stringColumnTypes, null);
assertEquals(
Pad.DEFAULT,
schemaGen.promptPad(header));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("f\n42", headers, stringColumnTypes, null);
assertEquals(
Pad.builder().type(PadType.FIXED).length(42).build(),
schemaGen.promptPad(header));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("m\n42", headers, stringColumnTypes, null);
assertEquals(
Pad.builder().type(PadType.MAX).length(42).build(),
schemaGen.promptPad(header));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void promptColumnInfoWithSourceHeadersTest() {
final String columnType = "sealed";
final String targetName = "target";
final String useSuffix = "no";
final String paddingType = "none";
createInteractiveSchemaGenerator(String.join("\n",
columnType,
targetName,
useSuffix,
paddingType),
headers,
stringColumnTypes, null);
assertEquals(
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("source"))
.targetHeader(new ColumnHeader("target"))
.type(ColumnType.SEALED)
.pad(Pad.DEFAULT)
.build(),
schemaGen.promptColumnInfo(new ColumnHeader("source"), 1, 2));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void promptColumnInfoWithSourceHeadersAndUnknownTypeTest() {
createInteractiveSchemaGenerator("target", headers, unknownColumnTypes, null);
assertEquals(
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("source"))
.targetHeader(new ColumnHeader("target"))
.type(ColumnType.CLEARTEXT)
.build(),
schemaGen.promptColumnInfo(new ColumnHeader("source"), 1, 2));
assertTrue(consoleOutput.toString().toLowerCase().contains("cryptographic computing is not supported"));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void promptColumnInfoWithoutSourceHeadersTest() {
createInteractiveSchemaGenerator("", null, stringColumnTypes, null);
final String columnType = "sealed";
final String targetName = "target";
final String useSuffix = "no";
final String paddingType = "none";
createInteractiveSchemaGenerator(String.join("\n",
columnType,
targetName,
useSuffix,
paddingType),
headers,
stringColumnTypes, null);
assertEquals(
ColumnSchema.builder()
.sourceHeader(null)
.targetHeader(new ColumnHeader("target"))
.type(ColumnType.SEALED)
.pad(Pad.builder().type(PadType.NONE).build())
.build(),
schemaGen.promptColumnInfo(null, 1, 2));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void promptColumnInfoWithoutSourceHeadersAndUnknownTypeTest() {
createInteractiveSchemaGenerator("target", null, unknownColumnTypes, null);
assertEquals(
ColumnSchema.builder()
.targetHeader(new ColumnHeader("target"))
.type(ColumnType.CLEARTEXT)
.build(),
schemaGen.promptColumnInfo(null, 1, 2));
assertTrue(consoleOutput.toString().toLowerCase().contains("cryptographic computing is not supported"));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void runGenerateNoSchemaTest() {
// 0 target columns to generate for each source column
createInteractiveSchemaGenerator("0\n".repeat(headers.size()), headers, stringColumnTypes, null);
schemaGen.run();
assertTrue(consoleOutput.toString().contains("No target columns were specified."));
assertEquals(0, targetSchema.toFile().length());
}
@Test
public void runGenerateSchemaWithSourceHeadersTest() {
final String userInput =
String.join("\n",
// source header1
"0", // number of columns for header1
// source header2
"3", // number of columns for header2
// header2, column 1
"sealed", // header2, column 1 type
"targetHeader2", // header2, column 1 target header
"yes", // header2, column 1 use suffix
"none", // header2, column 1 padding type
// header2, column 2
"fingerprint", // header2, column 2 type
"targetHeader2", // header2, column 2 target header
"yes", // header2, column 2 use suffix
// header2, column 3
"cleartext", // header2, column 3 type
"targetHeader2", // header2, column 3 target header
// source header3
"", // number of columns for header3 (default to 1)
"sealed",
"", // header3, column 1 target header (default)
"n", // header3, column 1 use suffix
"max", // header3, column 1 padding type
"" // header3, column 1 padding length (default 0)
);
createInteractiveSchemaGenerator(userInput, headers, stringColumnTypes, null);
schemaGen.run();
assertNotEquals(0, targetSchema.toFile().length());
final var expectedSchema = GsonUtil.fromJson(exampleMappedSchemaString, TableSchema.class);
final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class);
assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema)));
}
@Test
public void runGenerateSchemaWithSourceHeadersUnknownTypesTest() {
final String userInput =
String.join("\n",
// source header1
"0", // number of columns for header1
// source header2
"3", // number of columns for header2
// header2, column 1
// type is cleartext due to unknown client type
"targetHeader2_1", // header2, column 1 target header
// header2, column 2
// type is cleartext due to unknown client type
"targetHeader2_2", // header2, column 2 target header
// header2, column 3
// type is cleartext due to unknown client type
"targetHeader2_3", // header2, column 2 target header
// source header3
"", // number of columns for header3 (default to 1)
// type is cleartext due to unknown client type
"" // header3, column 1 target header (default)
);
createInteractiveSchemaGenerator(userInput, headers, unknownColumnTypes, null);
schemaGen.run();
assertNotEquals(0, targetSchema.toFile().length());
final var expectedSchema = GsonUtil.fromJson(exampleMappedSchemaAllCleartextString, TableSchema.class);
final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class);
assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema)));
}
@Test
public void runGenerateSchemaWithoutSourceHeadersTest() {
final String userInput =
String.join("\n",
// source header1
"0", // number of columns for header1
// source header2
"3", // number of columns for header2
// header2, column 1
"sealed", // header2, column 1 type
"targetHeader2", // header2, column 1 target header
"yes", // header2, column 1 use suffix
"none", // header2, column 1 padding type
// header2, column 2
"fingerprint", // header2, column 2 type
"targetHeader2", // header2, column 2 target header
"yes", // header2, column 2 use suffix
// header2, column 3
"cleartext", // header2, column 3 type
"targetHeader2", // header2, column 3 target header
// source header3
"", // number of columns for header3 (default to 1)
"sealed",
"targetHeader3", // header3, column 1 target header (default)
"n", // header3, column 1 use suffix
"max", // header3, column 1 padding type
"" // header3, column 1 padding length (default 0)
);
createInteractiveSchemaGenerator(userInput, null, stringColumnTypes, null);
schemaGen.run();
assertNotEquals(0, targetSchema.toFile().length());
final var expectedSchema = GsonUtil.fromJson(examplePositionalSchemaString, TableSchema.class);
final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class);
assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema)));
}
@Test
public void runGenerateSchemaWithoutSourceHeadersUnknownTypesTest() {
final String userInput =
String.join("\n",
// source header1
"0", // number of columns for header1
// source header2
"3", // number of columns for header2
// header2, column 1
// type is cleartext due to unknown client type
"targetHeader2_1", // header2, column 1 target header
// header2, column 2
// type is cleartext due to unknown client type
"targetHeader2_2", // header2, column 2 target header
// header2, column 3
// type is cleartext due to unknown client type
"targetHeader2_3", // header2, column 3 target header
// source header3
"", // number of columns for header3 (default to 1)
// type is cleartext due to unknown client type
"targetHeader3" // header3, column 1 target header (default)
);
createInteractiveSchemaGenerator(userInput, null, unknownColumnTypes, null);
schemaGen.run();
assertNotEquals(0, targetSchema.toFile().length());
final var expectedSchema = GsonUtil.fromJson(examplePositionalSchemaAllCleartextString, TableSchema.class);
final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class);
assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema)));
}
@Test
public void runTestWithBadInputsMixedIn() {
final String userInput =
String.join("\n",
// source header1
"zero", // bad number of columns for header1
"0", // number of columns for header1
// source header2
"three", // bad number of columns
"3", // number of columns
// header 2, column 1
"special", // bad column type
"sealed", // header 2, column 1 type
"long_name".repeat(Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH), // header 2, column 1 bad target header
"targetHeader2", // header 2, column 1 target header
"maybe", // header 2, column 1 bad use suffix
"yes", // header 2, column 1 use suffix
"super", // header 2, column 1 bad padding type
"none", // header 2, column 1 padding type
// header 2, column 2
"goin", // header 2, column 2 bad type
"fingerprint", // header 2, column 2 type
"long_name".repeat(Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH), // header 2, column 2 bad target header
"targetHeader2", // header 2, column 2 target header
"I can't decide", // header 2, column 2 bad use suffix
"yes", // header 2, column 2 use suffix
// header 2, column 3
"plaintext", // header 2, column 3 bad type
"cleartext", // header 2, column 3 type
"long_name".repeat(Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH), // header 2, column 3 bad target header
"targetHeader2", // header 2, column 3 target header
// source header3
"one", // bad number of columns for header3
"", // number of columns for header3 (default to 1)
"sealed",
"", // header3, column 1 target header (default)
"what", // bad header3, column 1 use suffix
"n", // header3, column 1 use suffix
"mux", // bad header3, column 1 padding type
"max", // header3, column 1 padding type
"zero", // header3, column 1 padding length (default 0)
"" // header3, column 1 padding length (default 0)
);
createInteractiveSchemaGenerator(userInput, headers, stringColumnTypes, null);
schemaGen.run();
assertNotEquals(0, targetSchema.toFile().length());
final TableSchema expectedSchema = GsonUtil.fromJson(exampleMappedSchemaString, TableSchema.class);
final TableSchema actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class);
assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson(actualSchema));
}
@Test
public void nullValueCsvSchemaGeneratorTest() {
// no headers
assertThrows(NullPointerException.class, () -> CsvSchemaGenerator.builder()
.inputCsvFile("../samples/csv/data_sample_without_quotes.csv")
.targetJsonFile(targetSchema.toString())
.overwrite(true).build());
// no target
assertThrows(NullPointerException.class, () -> CsvSchemaGenerator.builder()
.inputCsvFile("../samples/csv/data_sample_without_quotes.csv")
.overwrite(true)
.hasHeaders(true).build());
// no input
assertThrows(NullPointerException.class,
() -> CsvSchemaGenerator.builder()
.targetJsonFile(targetSchema.toString())
.overwrite(true)
.hasHeaders(true).build());
// no overwrite
assertThrows(NullPointerException.class, () -> CsvSchemaGenerator.builder()
.inputCsvFile("../samples/csv/data_sample_without_quotes.csv")
.targetJsonFile(targetSchema.toString())
.hasHeaders(true).build());
}
@Test
public void runGenerateSchemaWithSourceHeadersPermissiveSettingsTest() {
final String userInput =
String.join("\n",
// source header1
"0", // number of columns for header1
// source header2
"3", // number of columns for header2
// header2, column 1
"sealed", // header2, column 1 type
"targetHeader2", // header2, column 1 target header
"yes", // header2, column 1 use suffix
"none", // header2, column 1 padding type
// header2, column 2
"fingerprint", // header2, column 2 type
"targetHeader2", // header2, column 2 target header
"yes", // header2, column 2 use suffix
// header2, column 3
"cleartext", // header2, column 3 type
"targetHeader2", // header2, column 3 target header
// source header3
"", // number of columns for header3 (default to 1)
"sealed",
"", // header3, column 1 target header (default)
"n", // header3, column 1 use suffix
"max", // header3, column 1 padding type
"" // header3, column 1 padding length (default 0)
);
createInteractiveSchemaGenerator(userInput, headers, stringColumnTypes, ClientSettings.lowAssuranceMode());
schemaGen.run();
assertNotEquals(0, targetSchema.toFile().length());
final var expectedSchema = GsonUtil.fromJson(exampleMappedSchemaString, TableSchema.class);
final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class);
assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema)));
}
@Test
public void runGenerateSchemaWithSourceHeadersRestrictiveSettingsTest() {
final String userInput =
String.join("\n",
// source header1
"0", // number of columns for header1
// source header2
"3", // number of columns for header2
// header2, column 1
"sealed", // header2, column 1 type
"targetHeader2", // header2, column 1 target header
"yes", // header2, column 1 use suffix
"none", // header2, column 1 padding type
// header2, column 2
"fingerprint", // header2, column 2 type
"targetHeader2", // header2, column 2 target header
"yes", // header2, column 2 use suffix
// header2, column 3
"cleartext", // header2, column 3 type, NOT ALLOWED
"fingerprint",
"targetHeader2", // header2, column 3 target header
"n", // header2, column 3 use suffix
// source header3
"", // number of columns for header3 (default to 1)
"sealed",
"", // header3, column 1 target header (default)
"n", // header3, column 1 use suffix
"max", // header3, column 1 padding type
"" // header3, column 1 padding length (default 0)
);
createInteractiveSchemaGenerator(userInput, headers, stringColumnTypes, ClientSettings.highAssuranceMode());
schemaGen.run();
assertNotEquals(0, targetSchema.toFile().length());
final var expectedSchema = GsonUtil.fromJson(exampleMappedSchemaNoCleartextString, TableSchema.class);
final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class);
assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema)));
}
@Test
public void runGenerateSchemaWithSourceHeadersUnknownTypesPermissiveSettingsTest() {
final String userInput =
String.join("\n",
// source header1
"0", // number of columns for header1
// source header2
"3", // number of columns for header2
// header2, column 1
// type is cleartext due to unknown client type
"targetHeader2_1", // header2, column 1 target header
// header2, column 2
// type is cleartext due to unknown client type
"targetHeader2_2", // header2, column 2 target header
// header2, column 3
// type is cleartext due to unknown client type
"targetHeader2_3", // header2, column 2 target header
// source header3
"", // number of columns for header3 (default to 1)
// type is cleartext due to unknown client type
"" // header3, column 1 target header (default)
);
createInteractiveSchemaGenerator(userInput, headers, unknownColumnTypes, ClientSettings.lowAssuranceMode());
schemaGen.run();
assertNotEquals(0, targetSchema.toFile().length());
final var expectedSchema = GsonUtil.fromJson(exampleMappedSchemaAllCleartextString, TableSchema.class);
final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class);
assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema)));
}
@Test
public void runGenerateSchemaWithSourceHeadersUnknownTypesRestrictiveSettingsTest() {
final String userInput =
String.join("\n",
// source header1
"0", // number of columns for header1
// source header2
"3", // number of columns for header2
// header2, column 1
// type is cleartext due to unknown client type
"targetHeader2_1", // header2, column 1 target header
// header2, column 2
// type is cleartext due to unknown client type
"targetHeader2_2", // header2, column 2 target header
// header2, column 3
// type is cleartext due to unknown client type
"targetHeader2_3", // header2, column 2 target header
// source header3
"", // number of columns for header3 (default to 1)
// type is cleartext due to unknown client type
"" // header3, column 1 target header (default)
);
createInteractiveSchemaGenerator(userInput, headers, unknownColumnTypes, ClientSettings.highAssuranceMode());
schemaGen.run();
assertTrue(consoleOutput.toString().contains("No source columns could be considered for output"));
assertEquals(0, targetSchema.toFile().length());
}
}
| 2,396 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io/schema/CsvSchemaGeneratorTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.schema;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import com.amazonaws.c3r.utils.FileUtil;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.file.Path;
import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.DATA_SAMPLE_HEADERS;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class CsvSchemaGeneratorTest {
private CsvSchemaGenerator getTestSchemaGenerator(final String file) throws IOException {
final String output = FileTestUtility.resolve("schema.json").toString();
return CsvSchemaGenerator.builder()
.inputCsvFile(file)
.hasHeaders(true)
.targetJsonFile(output)
.overwrite(true)
.build();
}
@Test
public void getSourceHeadersTest() throws IOException {
assertEquals(
DATA_SAMPLE_HEADERS,
getTestSchemaGenerator(FileUtil.CURRENT_DIR + "/../samples/csv/data_sample_without_quotes.csv").getSourceHeaders());
}
@Test
public void getSourceColumnCountTest() throws IOException {
assertEquals(
DATA_SAMPLE_HEADERS.size(),
getTestSchemaGenerator(FileUtil.CURRENT_DIR + "/../samples/csv/data_sample_without_quotes.csv").getSourceColumnCount());
}
@Test
public void emptyFileTest() throws IOException {
final Path emptyCsvFile = FileTestUtility.createTempFile("empty", ".csv");
assertThrows(C3rRuntimeException.class, () ->
getTestSchemaGenerator(emptyCsvFile.toString()));
}
}
| 2,397 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io/schema/TemplateSchemaGeneratorTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.schema;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.data.ClientDataType;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class TemplateSchemaGeneratorTest {
private Path tempSchema;
@BeforeEach
public void setup() throws IOException {
tempSchema = FileTestUtility.resolve("schema.json");
}
@Test
public void validateErrorWithMismatchedColumnCounts() {
assertThrows(C3rIllegalArgumentException.class, () ->
TemplateSchemaGenerator.builder()
.sourceHeaders(List.of(new ColumnHeader("_c0")))
.sourceColumnTypes(List.of())
.targetJsonFile(tempSchema.toString())
.build());
}
@Test
public void testTemplateWithSourceHeadersNoSettingsGeneration() throws IOException {
final var expectedContent = String.join("\n",
"{",
" \"headerRow\": true,",
" \"columns\": [",
" {",
" \"sourceHeader\": \"header1\",",
" \"targetHeader\": \"header1\",",
" \"type\": \"[sealed|fingerprint|cleartext]\",",
" \"pad\": {",
" \"COMMENT\": \"omit this pad entry unless column type is sealed\",",
" \"type\": \"[none|fixed|max]\",",
" \"length\": \"omit length property for type none, otherwise specify value in [0, 10000]\"",
" }",
" },",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"header2\",",
" \"type\": \"cleartext\"",
" }",
" ]",
"}"
);
final var headers = List.of(
new ColumnHeader("header1"),
new ColumnHeader("header2")
);
final List<ClientDataType> types = List.of(ClientDataType.STRING, ClientDataType.UNKNOWN);
final var generator = TemplateSchemaGenerator.builder()
.sourceHeaders(headers)
.sourceColumnTypes(types)
.targetJsonFile(tempSchema.toString())
.build();
generator.run();
final String content = Files.readString(tempSchema, StandardCharsets.UTF_8);
assertEquals(expectedContent, content);
}
@Test
public void testTemplateWithoutSourceHeadersNoSettingsGeneration() throws IOException {
final String expectedPositionalSchemaOutput = String.join("\n",
"{",
" \"headerRow\": false,",
" \"columns\": [",
" [",
" {",
" \"targetHeader\": \"_c0\",",
" \"type\": \"[sealed|fingerprint|cleartext]\",",
" \"pad\": {",
" \"COMMENT\": \"omit this pad entry unless column type is sealed\",",
" \"type\": \"[none|fixed|max]\",",
" \"length\": \"omit length property for type none, otherwise specify value in [0, 10000]\"",
" }",
" }",
" ],",
" [",
" {",
" \"targetHeader\": \"_c1\",",
" \"type\": \"cleartext\"",
" }",
" ]",
" ]",
"}");
final List<ClientDataType> types = List.of(ClientDataType.STRING, ClientDataType.UNKNOWN);
TemplateSchemaGenerator.builder()
.sourceHeaders(null)
.sourceColumnTypes(types)
.targetJsonFile(tempSchema.toString())
.build()
.run();
final String content = Files.readString(tempSchema);
assertEquals(expectedPositionalSchemaOutput, content);
}
@Test
public void testTemplateWithSourceHeadersPermissiveSettingsGeneration() throws IOException {
final var expectedContent = String.join("\n",
"{",
" \"headerRow\": true,",
" \"columns\": [",
" {",
" \"sourceHeader\": \"header1\",",
" \"targetHeader\": \"header1\",",
" \"type\": \"[sealed|fingerprint|cleartext]\",",
" \"pad\": {",
" \"COMMENT\": \"omit this pad entry unless column type is sealed\",",
" \"type\": \"[none|fixed|max]\",",
" \"length\": \"omit length property for type none, otherwise specify value in [0, 10000]\"",
" }",
" },",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"header2\",",
" \"type\": \"cleartext\"",
" }",
" ]",
"}"
);
final var headers = List.of(
new ColumnHeader("header1"),
new ColumnHeader("header2")
);
final List<ClientDataType> types = List.of(ClientDataType.STRING, ClientDataType.UNKNOWN);
final var generator = TemplateSchemaGenerator.builder()
.sourceHeaders(headers)
.sourceColumnTypes(types)
.targetJsonFile(tempSchema.toString())
.clientSettings(ClientSettings.lowAssuranceMode())
.build();
generator.run();
final String content = Files.readString(tempSchema, StandardCharsets.UTF_8);
assertEquals(expectedContent, content);
}
@Test
public void testTemplateWithoutSourceHeadersPermissiveSettingsGeneration() throws IOException {
final String expectedPositionalSchemaOutput = String.join("\n",
"{",
" \"headerRow\": false,",
" \"columns\": [",
" [",
" {",
" \"targetHeader\": \"_c0\",",
" \"type\": \"[sealed|fingerprint|cleartext]\",",
" \"pad\": {",
" \"COMMENT\": \"omit this pad entry unless column type is sealed\",",
" \"type\": \"[none|fixed|max]\",",
" \"length\": \"omit length property for type none, otherwise specify value in [0, 10000]\"",
" }",
" }",
" ],",
" [",
" {",
" \"targetHeader\": \"_c1\",",
" \"type\": \"cleartext\"",
" }",
" ]",
" ]",
"}");
final List<ClientDataType> types = List.of(ClientDataType.STRING, ClientDataType.UNKNOWN);
TemplateSchemaGenerator.builder()
.sourceHeaders(null)
.sourceColumnTypes(types)
.targetJsonFile(tempSchema.toString())
.clientSettings(ClientSettings.lowAssuranceMode())
.build()
.run();
final String content = Files.readString(tempSchema);
assertEquals(expectedPositionalSchemaOutput, content);
}
@Test
public void testTemplateWithSourceHeadersRestrictiveSettingsGeneration() throws IOException {
final var expectedContent = String.join("\n",
"{",
" \"headerRow\": true,",
" \"columns\": [",
" {",
" \"sourceHeader\": \"header1\",",
" \"targetHeader\": \"header1\",",
" \"type\": \"[sealed|fingerprint]\",",
" \"pad\": {",
" \"COMMENT\": \"omit this pad entry unless column type is sealed\",",
" \"type\": \"[none|fixed|max]\",",
" \"length\": \"omit length property for type none, otherwise specify value in [0, 10000]\"",
" }",
" }",
" ]",
"}"
);
final var headers = List.of(
new ColumnHeader("header1"),
new ColumnHeader("header2")
);
final List<ClientDataType> types = List.of(ClientDataType.STRING, ClientDataType.UNKNOWN);
final var generator = TemplateSchemaGenerator.builder()
.sourceHeaders(headers)
.sourceColumnTypes(types)
.targetJsonFile(tempSchema.toString())
.clientSettings(ClientSettings.highAssuranceMode())
.build();
generator.run();
final String content = Files.readString(tempSchema, StandardCharsets.UTF_8);
assertEquals(expectedContent, content);
}
@Test
public void testTemplateWithoutSourceHeadersRestrictiveSettingsGeneration() throws IOException {
final String expectedPositionalSchemaOutput = String.join("\n",
"{",
" \"headerRow\": false,",
" \"columns\": [",
" [",
" {",
" \"targetHeader\": \"_c0\",",
" \"type\": \"[sealed|fingerprint]\",",
" \"pad\": {",
" \"COMMENT\": \"omit this pad entry unless column type is sealed\",",
" \"type\": \"[none|fixed|max]\",",
" \"length\": \"omit length property for type none, otherwise specify value in [0, 10000]\"",
" }",
" }",
" ],",
" []",
" ]",
"}");
final List<ClientDataType> types = List.of(ClientDataType.STRING, ClientDataType.UNKNOWN);
TemplateSchemaGenerator.builder()
.sourceHeaders(null)
.sourceColumnTypes(types)
.targetJsonFile(tempSchema.toString())
.clientSettings(ClientSettings.highAssuranceMode())
.build()
.run();
final String content = Files.readString(tempSchema);
assertEquals(expectedPositionalSchemaOutput, content);
}
}
| 2,398 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io/csv/SparkCsvWriterTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.csv;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.io.CsvRowWriter;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class SparkCsvWriterTest {
private final List<ColumnHeader> dataSampleHeaders =
Stream.of("FirstName",
"LastName",
"Address",
"City",
"State",
"PhoneNumber",
"Title",
"Level",
"Notes"
)
.map(ColumnHeader::new)
.collect(Collectors.toList());
private final SparkSession session = SparkSessionTestUtility.initSparkSession();
private Path tempInputFile;
private Path tempOutputDir;
@BeforeEach
public void setup() throws IOException {
tempInputFile = FileTestUtility.createTempFile("temp", ".csv");
tempOutputDir = FileTestUtility.createTempDir();
}
@Test
public void initWriterHeadersTest() {
final Map<String, String> properties = new HashMap<>();
final String headers = dataSampleHeaders.stream().map(ColumnHeader::toString).collect(Collectors.joining(","));
properties.put("headers", headers);
properties.put("path", tempOutputDir.toString());
final CsvRowWriter writer = SparkCsvWriter.initWriter(0, properties);
assertEquals(dataSampleHeaders.size(), writer.getHeaders().size());
assertTrue(dataSampleHeaders.containsAll(writer.getHeaders()));
}
@Test
public void initWriterNoPathTest() {
final Map<String, String> properties = new HashMap<>();
final String headers = dataSampleHeaders.stream().map(ColumnHeader::toString).collect(Collectors.joining(","));
properties.put("headers", headers);
assertThrows(C3rRuntimeException.class, () -> SparkCsvWriter.initWriter(0, properties));
}
@Test
public void initWriterTargetTest() {
final Map<String, String> properties = new HashMap<>();
final String headers = dataSampleHeaders.stream().map(ColumnHeader::toString).collect(Collectors.joining(","));
properties.put("headers", headers);
properties.put("path", tempOutputDir.toString());
properties.put("sessionUuid", UUID.randomUUID().toString());
final CsvRowWriter writer = SparkCsvWriter.initWriter(1, properties);
String target = writer.getTargetName();
target = target.substring(tempOutputDir.toString().length() + 1); // Remove dir path
final String[] split = target.split("-");
assertEquals(7, split.length); // UUID hyphens plus the initial.
assertEquals("part", split[0]);
assertEquals("00001", split[1]);
}
@Test
public void quotedSpaceTest() throws IOException {
final String singleRowQuotedSpace = "column\n\" \"";
Files.writeString(tempInputFile, singleRowQuotedSpace);
final Dataset<Row> originalDataset = SparkCsvReader.readInput(session,
tempInputFile.toString(),
null,
null);
SparkCsvWriter.writeOutput(originalDataset, tempOutputDir.toString(), null);
final Dataset<Row> writtenDataset = SparkCsvReader.readInput(session,
tempOutputDir.toString(),
null,
null);
final List<Row> originalData = originalDataset.collectAsList();
final List<Row> writtenData = writtenDataset.collectAsList();
// ensure data read in doesn't change when written out
assertEquals(originalData.get(0).getString(0), writtenData.get(0).get(0));
assertEquals(" ", writtenData.get(0).get(0));
}
@Test
public void unquotedBlankTest() throws IOException {
final String singleRowQuotedSpace = "column, column2\n ,";
Files.writeString(tempInputFile, singleRowQuotedSpace);
final Dataset<Row> originalDataset = SparkCsvReader.readInput(session,
tempInputFile.toString(),
null,
null);
SparkCsvWriter.writeOutput(originalDataset, tempOutputDir.toString(), null);
final Dataset<Row> writtenDataset = SparkCsvReader.readInput(session,
tempOutputDir.toString(),
null,
null);
final List<Row> originalData = originalDataset.collectAsList();
final List<Row> writtenData = writtenDataset.collectAsList();
// ensure data read in doesn't change when written out
assertNull(originalData.get(0).get(0));
assertNull(originalData.get(0).get(1));
assertNull(writtenData.get(0).get(0));
assertNull(writtenData.get(0).get(1));
}
@Test
public void customNullTest() throws IOException {
final String singleRowQuotedSpace = "column, column2\ncolumn,";
Files.writeString(tempInputFile, singleRowQuotedSpace);
final Dataset<Row> originalDataset = SparkCsvReader.readInput(session,
tempInputFile.toString(),
"column",
null);
SparkCsvWriter.writeOutput(originalDataset, tempOutputDir.toString(), "column");
final Dataset<Row> writtenDataset = SparkCsvReader.readInput(session,
tempOutputDir.toString(),
null,
null);
final List<Row> originalData = originalDataset.collectAsList();
final List<Row> writtenData = writtenDataset.collectAsList();
// ensure a column with a header that equals the custom null value is not dropped.
assertEquals("column", originalDataset.columns()[0]);
assertEquals("column", writtenDataset.columns()[0]);
// ensure custom null respected
assertNull(originalData.get(0).get(0));
assertEquals("column", writtenData.get(0).get(0));
}
}
| 2,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.