index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/spark/core/src/test/java/org/apache/spark | Create_ds/spark/core/src/test/java/org/apache/spark/io/GenericFileInputStreamSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.io;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import static org.junit.Assert.assertEquals;
/**
* Tests functionality of {@link NioBufferedFileInputStream}
*/
public abstract class GenericFileInputStreamSuite {
private byte[] randomBytes;
protected File inputFile;
protected InputStream[] inputStreams;
@Before
public void setUp() throws IOException {
// Create a byte array of size 2 MB with random bytes
randomBytes = RandomUtils.nextBytes(2 * 1024 * 1024);
inputFile = File.createTempFile("temp-file", ".tmp");
FileUtils.writeByteArrayToFile(inputFile, randomBytes);
}
@After
public void tearDown() {
inputFile.delete();
}
@Test
public void testReadOneByte() throws IOException {
for (InputStream inputStream: inputStreams) {
for (int i = 0; i < randomBytes.length; i++) {
assertEquals(randomBytes[i], (byte) inputStream.read());
}
}
}
@Test
public void testReadMultipleBytes() throws IOException {
for (InputStream inputStream: inputStreams) {
byte[] readBytes = new byte[8 * 1024];
int i = 0;
while (i < randomBytes.length) {
int read = inputStream.read(readBytes, 0, 8 * 1024);
for (int j = 0; j < read; j++) {
assertEquals(randomBytes[i], readBytes[j]);
i++;
}
}
}
}
@Test
public void testBytesSkipped() throws IOException {
for (InputStream inputStream: inputStreams) {
assertEquals(1024, inputStream.skip(1024));
for (int i = 1024; i < randomBytes.length; i++) {
assertEquals(randomBytes[i], (byte) inputStream.read());
}
}
}
@Test
public void testBytesSkippedAfterRead() throws IOException {
for (InputStream inputStream: inputStreams) {
for (int i = 0; i < 1024; i++) {
assertEquals(randomBytes[i], (byte) inputStream.read());
}
assertEquals(1024, inputStream.skip(1024));
for (int i = 2048; i < randomBytes.length; i++) {
assertEquals(randomBytes[i], (byte) inputStream.read());
}
}
}
@Test
public void testNegativeBytesSkippedAfterRead() throws IOException {
for (InputStream inputStream: inputStreams) {
for (int i = 0; i < 1024; i++) {
assertEquals(randomBytes[i], (byte) inputStream.read());
}
// Skipping negative bytes should essential be a no-op
assertEquals(0, inputStream.skip(-1));
assertEquals(0, inputStream.skip(-1024));
assertEquals(0, inputStream.skip(Long.MIN_VALUE));
assertEquals(1024, inputStream.skip(1024));
for (int i = 2048; i < randomBytes.length; i++) {
assertEquals(randomBytes[i], (byte) inputStream.read());
}
}
}
@Test
public void testSkipFromFileChannel() throws IOException {
for (InputStream inputStream: inputStreams) {
// Since the buffer is smaller than the skipped bytes, this will guarantee
// we skip from underlying file channel.
assertEquals(1024, inputStream.skip(1024));
for (int i = 1024; i < 2048; i++) {
assertEquals(randomBytes[i], (byte) inputStream.read());
}
assertEquals(256, inputStream.skip(256));
assertEquals(256, inputStream.skip(256));
assertEquals(512, inputStream.skip(512));
for (int i = 3072; i < randomBytes.length; i++) {
assertEquals(randomBytes[i], (byte) inputStream.read());
}
}
}
@Test
public void testBytesSkippedAfterEOF() throws IOException {
for (InputStream inputStream: inputStreams) {
assertEquals(randomBytes.length, inputStream.skip(randomBytes.length + 1));
assertEquals(-1, inputStream.read());
}
}
}
| 9,600 |
0 | Create_ds/spark/core/src/test/java/org/apache/spark | Create_ds/spark/core/src/test/java/org/apache/spark/io/NioBufferedInputStreamSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.io;
import org.junit.Before;
import java.io.InputStream;
import java.io.IOException;
/**
* Tests functionality of {@link NioBufferedFileInputStream}
*/
public class NioBufferedInputStreamSuite extends GenericFileInputStreamSuite {
@Before
public void setUp() throws IOException {
super.setUp();
inputStreams = new InputStream[] {
new NioBufferedFileInputStream(inputFile), // default
new NioBufferedFileInputStream(inputFile, 123) // small, unaligned buffer
};
}
}
| 9,601 |
0 | Create_ds/spark/core/src/test/java/org/apache/spark | Create_ds/spark/core/src/test/java/org/apache/spark/io/ReadAheadInputStreamSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.io;
import org.junit.Before;
import java.io.IOException;
import java.io.InputStream;
/**
* Tests functionality of {@link ReadAheadInputStreamSuite}
*/
public class ReadAheadInputStreamSuite extends GenericFileInputStreamSuite {
@Before
public void setUp() throws IOException {
super.setUp();
inputStreams = new InputStream[] {
// Tests equal and aligned buffers of wrapped an outer stream.
new ReadAheadInputStream(new NioBufferedFileInputStream(inputFile, 8 * 1024), 8 * 1024),
// Tests aligned buffers, wrapped bigger than outer.
new ReadAheadInputStream(new NioBufferedFileInputStream(inputFile, 3 * 1024), 2 * 1024),
// Tests aligned buffers, wrapped smaller than outer.
new ReadAheadInputStream(new NioBufferedFileInputStream(inputFile, 2 * 1024), 3 * 1024),
// Tests unaligned buffers, wrapped bigger than outer.
new ReadAheadInputStream(new NioBufferedFileInputStream(inputFile, 321), 123),
// Tests unaligned buffers, wrapped smaller than outer.
new ReadAheadInputStream(new NioBufferedFileInputStream(inputFile, 123), 321)
};
}
}
| 9,602 |
0 | Create_ds/spark/core/src/test/java/org/apache/spark/unsafe | Create_ds/spark/core/src/test/java/org/apache/spark/unsafe/map/BytesToBytesMapOffHeapSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.map;
public class BytesToBytesMapOffHeapSuite extends AbstractBytesToBytesMapSuite {
@Override
protected boolean useOffHeapMemoryAllocator() {
return true;
}
}
| 9,603 |
0 | Create_ds/spark/core/src/test/java/org/apache/spark/unsafe | Create_ds/spark/core/src/test/java/org/apache/spark/unsafe/map/BytesToBytesMapOnHeapSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.map;
public class BytesToBytesMapOnHeapSuite extends AbstractBytesToBytesMapSuite {
@Override
protected boolean useOffHeapMemoryAllocator() {
return false;
}
}
| 9,604 |
0 | Create_ds/spark/core/src/test/java/org/apache/spark/unsafe | Create_ds/spark/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.map;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import scala.Tuple2$;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.apache.spark.SparkConf;
import org.apache.spark.executor.ShuffleWriteMetrics;
import org.apache.spark.memory.MemoryMode;
import org.apache.spark.memory.TestMemoryConsumer;
import org.apache.spark.memory.TaskMemoryManager;
import org.apache.spark.memory.TestMemoryManager;
import org.apache.spark.network.util.JavaUtils;
import org.apache.spark.serializer.JavaSerializer;
import org.apache.spark.serializer.SerializerInstance;
import org.apache.spark.serializer.SerializerManager;
import org.apache.spark.storage.*;
import org.apache.spark.unsafe.Platform;
import org.apache.spark.unsafe.array.ByteArrayMethods;
import org.apache.spark.util.Utils;
import static org.hamcrest.Matchers.greaterThan;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.mockito.Answers.RETURNS_SMART_NULLS;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Mockito.when;
public abstract class AbstractBytesToBytesMapSuite {
private final Random rand = new Random(42);
private TestMemoryManager memoryManager;
private TaskMemoryManager taskMemoryManager;
private SerializerManager serializerManager = new SerializerManager(
new JavaSerializer(new SparkConf()),
new SparkConf().set("spark.shuffle.spill.compress", "false"));
private static final long PAGE_SIZE_BYTES = 1L << 26; // 64 megabytes
final LinkedList<File> spillFilesCreated = new LinkedList<>();
File tempDir;
@Mock(answer = RETURNS_SMART_NULLS) BlockManager blockManager;
@Mock(answer = RETURNS_SMART_NULLS) DiskBlockManager diskBlockManager;
@Before
public void setup() {
memoryManager =
new TestMemoryManager(
new SparkConf()
.set("spark.memory.offHeap.enabled", "" + useOffHeapMemoryAllocator())
.set("spark.memory.offHeap.size", "256mb")
.set("spark.shuffle.spill.compress", "false")
.set("spark.shuffle.compress", "false"));
taskMemoryManager = new TaskMemoryManager(memoryManager, 0);
tempDir = Utils.createTempDir(System.getProperty("java.io.tmpdir"), "unsafe-test");
spillFilesCreated.clear();
MockitoAnnotations.initMocks(this);
when(blockManager.diskBlockManager()).thenReturn(diskBlockManager);
when(diskBlockManager.createTempLocalBlock()).thenAnswer(invocationOnMock -> {
TempLocalBlockId blockId = new TempLocalBlockId(UUID.randomUUID());
File file = File.createTempFile("spillFile", ".spill", tempDir);
spillFilesCreated.add(file);
return Tuple2$.MODULE$.apply(blockId, file);
});
when(blockManager.getDiskWriter(
any(BlockId.class),
any(File.class),
any(SerializerInstance.class),
anyInt(),
any(ShuffleWriteMetrics.class))).thenAnswer(invocationOnMock -> {
Object[] args = invocationOnMock.getArguments();
return new DiskBlockObjectWriter(
(File) args[1],
serializerManager,
(SerializerInstance) args[2],
(Integer) args[3],
false,
(ShuffleWriteMetrics) args[4],
(BlockId) args[0]
);
});
}
@After
public void tearDown() {
Utils.deleteRecursively(tempDir);
tempDir = null;
if (taskMemoryManager != null) {
Assert.assertEquals(0L, taskMemoryManager.cleanUpAllAllocatedMemory());
long leakedMemory = taskMemoryManager.getMemoryConsumptionForThisTask();
taskMemoryManager = null;
Assert.assertEquals(0L, leakedMemory);
}
}
protected abstract boolean useOffHeapMemoryAllocator();
private static byte[] getByteArray(Object base, long offset, int size) {
final byte[] arr = new byte[size];
Platform.copyMemory(base, offset, arr, Platform.BYTE_ARRAY_OFFSET, size);
return arr;
}
private byte[] getRandomByteArray(int numWords) {
Assert.assertTrue(numWords >= 0);
final int lengthInBytes = numWords * 8;
final byte[] bytes = new byte[lengthInBytes];
rand.nextBytes(bytes);
return bytes;
}
/**
* Fast equality checking for byte arrays, since these comparisons are a bottleneck
* in our stress tests.
*/
private static boolean arrayEquals(
byte[] expected,
Object base,
long offset,
long actualLengthBytes) {
return (actualLengthBytes == expected.length) && ByteArrayMethods.arrayEquals(
expected,
Platform.BYTE_ARRAY_OFFSET,
base,
offset,
expected.length
);
}
@Test
public void emptyMap() {
BytesToBytesMap map = new BytesToBytesMap(taskMemoryManager, 64, PAGE_SIZE_BYTES);
try {
Assert.assertEquals(0, map.numKeys());
final int keyLengthInWords = 10;
final int keyLengthInBytes = keyLengthInWords * 8;
final byte[] key = getRandomByteArray(keyLengthInWords);
Assert.assertFalse(map.lookup(key, Platform.BYTE_ARRAY_OFFSET, keyLengthInBytes).isDefined());
Assert.assertFalse(map.iterator().hasNext());
} finally {
map.free();
}
}
@Test
public void setAndRetrieveAKey() {
BytesToBytesMap map = new BytesToBytesMap(taskMemoryManager, 64, PAGE_SIZE_BYTES);
final int recordLengthWords = 10;
final int recordLengthBytes = recordLengthWords * 8;
final byte[] keyData = getRandomByteArray(recordLengthWords);
final byte[] valueData = getRandomByteArray(recordLengthWords);
try {
final BytesToBytesMap.Location loc =
map.lookup(keyData, Platform.BYTE_ARRAY_OFFSET, recordLengthBytes);
Assert.assertFalse(loc.isDefined());
Assert.assertTrue(loc.append(
keyData,
Platform.BYTE_ARRAY_OFFSET,
recordLengthBytes,
valueData,
Platform.BYTE_ARRAY_OFFSET,
recordLengthBytes
));
// After storing the key and value, the other location methods should return results that
// reflect the result of this store without us having to call lookup() again on the same key.
Assert.assertEquals(recordLengthBytes, loc.getKeyLength());
Assert.assertEquals(recordLengthBytes, loc.getValueLength());
Assert.assertArrayEquals(keyData,
getByteArray(loc.getKeyBase(), loc.getKeyOffset(), recordLengthBytes));
Assert.assertArrayEquals(valueData,
getByteArray(loc.getValueBase(), loc.getValueOffset(), recordLengthBytes));
// After calling lookup() the location should still point to the correct data.
Assert.assertTrue(
map.lookup(keyData, Platform.BYTE_ARRAY_OFFSET, recordLengthBytes).isDefined());
Assert.assertEquals(recordLengthBytes, loc.getKeyLength());
Assert.assertEquals(recordLengthBytes, loc.getValueLength());
Assert.assertArrayEquals(keyData,
getByteArray(loc.getKeyBase(), loc.getKeyOffset(), recordLengthBytes));
Assert.assertArrayEquals(valueData,
getByteArray(loc.getValueBase(), loc.getValueOffset(), recordLengthBytes));
try {
Assert.assertTrue(loc.append(
keyData,
Platform.BYTE_ARRAY_OFFSET,
recordLengthBytes,
valueData,
Platform.BYTE_ARRAY_OFFSET,
recordLengthBytes
));
Assert.fail("Should not be able to set a new value for a key");
} catch (AssertionError e) {
// Expected exception; do nothing.
}
} finally {
map.free();
}
}
private void iteratorTestBase(boolean destructive) throws Exception {
final int size = 4096;
BytesToBytesMap map = new BytesToBytesMap(taskMemoryManager, size / 2, PAGE_SIZE_BYTES);
try {
for (long i = 0; i < size; i++) {
final long[] value = new long[] { i };
final BytesToBytesMap.Location loc =
map.lookup(value, Platform.LONG_ARRAY_OFFSET, 8);
Assert.assertFalse(loc.isDefined());
// Ensure that we store some zero-length keys
if (i % 5 == 0) {
Assert.assertTrue(loc.append(
null,
Platform.LONG_ARRAY_OFFSET,
0,
value,
Platform.LONG_ARRAY_OFFSET,
8
));
} else {
Assert.assertTrue(loc.append(
value,
Platform.LONG_ARRAY_OFFSET,
8,
value,
Platform.LONG_ARRAY_OFFSET,
8
));
}
}
final java.util.BitSet valuesSeen = new java.util.BitSet(size);
final Iterator<BytesToBytesMap.Location> iter;
if (destructive) {
iter = map.destructiveIterator();
} else {
iter = map.iterator();
}
int numPages = map.getNumDataPages();
int countFreedPages = 0;
while (iter.hasNext()) {
final BytesToBytesMap.Location loc = iter.next();
Assert.assertTrue(loc.isDefined());
final long value = Platform.getLong(loc.getValueBase(), loc.getValueOffset());
final long keyLength = loc.getKeyLength();
if (keyLength == 0) {
Assert.assertTrue("value " + value + " was not divisible by 5", value % 5 == 0);
} else {
final long key = Platform.getLong(loc.getKeyBase(), loc.getKeyOffset());
Assert.assertEquals(value, key);
}
valuesSeen.set((int) value);
if (destructive) {
// The iterator moves onto next page and frees previous page
if (map.getNumDataPages() < numPages) {
numPages = map.getNumDataPages();
countFreedPages++;
}
}
}
if (destructive) {
// Latest page is not freed by iterator but by map itself
Assert.assertEquals(countFreedPages, numPages - 1);
}
Assert.assertEquals(size, valuesSeen.cardinality());
} finally {
map.free();
}
}
@Test
public void iteratorTest() throws Exception {
iteratorTestBase(false);
}
@Test
public void destructiveIteratorTest() throws Exception {
iteratorTestBase(true);
}
@Test
public void iteratingOverDataPagesWithWastedSpace() throws Exception {
final int NUM_ENTRIES = 1000 * 1000;
final int KEY_LENGTH = 24;
final int VALUE_LENGTH = 40;
final BytesToBytesMap map =
new BytesToBytesMap(taskMemoryManager, NUM_ENTRIES, PAGE_SIZE_BYTES);
// Each record will take 8 + 24 + 40 = 72 bytes of space in the data page. Our 64-megabyte
// pages won't be evenly-divisible by records of this size, which will cause us to waste some
// space at the end of the page. This is necessary in order for us to take the end-of-record
// handling branch in iterator().
try {
for (int i = 0; i < NUM_ENTRIES; i++) {
final long[] key = new long[] { i, i, i }; // 3 * 8 = 24 bytes
final long[] value = new long[] { i, i, i, i, i }; // 5 * 8 = 40 bytes
final BytesToBytesMap.Location loc = map.lookup(
key,
Platform.LONG_ARRAY_OFFSET,
KEY_LENGTH
);
Assert.assertFalse(loc.isDefined());
Assert.assertTrue(loc.append(
key,
Platform.LONG_ARRAY_OFFSET,
KEY_LENGTH,
value,
Platform.LONG_ARRAY_OFFSET,
VALUE_LENGTH
));
}
Assert.assertEquals(2, map.getNumDataPages());
final java.util.BitSet valuesSeen = new java.util.BitSet(NUM_ENTRIES);
final Iterator<BytesToBytesMap.Location> iter = map.iterator();
final long[] key = new long[KEY_LENGTH / 8];
final long[] value = new long[VALUE_LENGTH / 8];
while (iter.hasNext()) {
final BytesToBytesMap.Location loc = iter.next();
Assert.assertTrue(loc.isDefined());
Assert.assertEquals(KEY_LENGTH, loc.getKeyLength());
Assert.assertEquals(VALUE_LENGTH, loc.getValueLength());
Platform.copyMemory(
loc.getKeyBase(),
loc.getKeyOffset(),
key,
Platform.LONG_ARRAY_OFFSET,
KEY_LENGTH
);
Platform.copyMemory(
loc.getValueBase(),
loc.getValueOffset(),
value,
Platform.LONG_ARRAY_OFFSET,
VALUE_LENGTH
);
for (long j : key) {
Assert.assertEquals(key[0], j);
}
for (long j : value) {
Assert.assertEquals(key[0], j);
}
valuesSeen.set((int) key[0]);
}
Assert.assertEquals(NUM_ENTRIES, valuesSeen.cardinality());
} finally {
map.free();
}
}
@Test
public void randomizedStressTest() {
final int size = 32768;
// Java arrays' hashCodes() aren't based on the arrays' contents, so we need to wrap arrays
// into ByteBuffers in order to use them as keys here.
final Map<ByteBuffer, byte[]> expected = new HashMap<>();
final BytesToBytesMap map = new BytesToBytesMap(taskMemoryManager, size, PAGE_SIZE_BYTES);
try {
// Fill the map to 90% full so that we can trigger probing
for (int i = 0; i < size * 0.9; i++) {
final byte[] key = getRandomByteArray(rand.nextInt(256) + 1);
final byte[] value = getRandomByteArray(rand.nextInt(256) + 1);
if (!expected.containsKey(ByteBuffer.wrap(key))) {
expected.put(ByteBuffer.wrap(key), value);
final BytesToBytesMap.Location loc = map.lookup(
key,
Platform.BYTE_ARRAY_OFFSET,
key.length
);
Assert.assertFalse(loc.isDefined());
Assert.assertTrue(loc.append(
key,
Platform.BYTE_ARRAY_OFFSET,
key.length,
value,
Platform.BYTE_ARRAY_OFFSET,
value.length
));
// After calling putNewKey, the following should be true, even before calling
// lookup():
Assert.assertTrue(loc.isDefined());
Assert.assertEquals(key.length, loc.getKeyLength());
Assert.assertEquals(value.length, loc.getValueLength());
Assert.assertTrue(arrayEquals(key, loc.getKeyBase(), loc.getKeyOffset(), key.length));
Assert.assertTrue(
arrayEquals(value, loc.getValueBase(), loc.getValueOffset(), value.length));
}
}
for (Map.Entry<ByteBuffer, byte[]> entry : expected.entrySet()) {
final byte[] key = JavaUtils.bufferToArray(entry.getKey());
final byte[] value = entry.getValue();
final BytesToBytesMap.Location loc =
map.lookup(key, Platform.BYTE_ARRAY_OFFSET, key.length);
Assert.assertTrue(loc.isDefined());
Assert.assertTrue(
arrayEquals(key, loc.getKeyBase(), loc.getKeyOffset(), loc.getKeyLength()));
Assert.assertTrue(
arrayEquals(value, loc.getValueBase(), loc.getValueOffset(), loc.getValueLength()));
}
} finally {
map.free();
}
}
@Test
public void randomizedTestWithRecordsLargerThanPageSize() {
final long pageSizeBytes = 128;
final BytesToBytesMap map = new BytesToBytesMap(taskMemoryManager, 64, pageSizeBytes);
// Java arrays' hashCodes() aren't based on the arrays' contents, so we need to wrap arrays
// into ByteBuffers in order to use them as keys here.
final Map<ByteBuffer, byte[]> expected = new HashMap<>();
try {
for (int i = 0; i < 1000; i++) {
final byte[] key = getRandomByteArray(rand.nextInt(128));
final byte[] value = getRandomByteArray(rand.nextInt(128));
if (!expected.containsKey(ByteBuffer.wrap(key))) {
expected.put(ByteBuffer.wrap(key), value);
final BytesToBytesMap.Location loc = map.lookup(
key,
Platform.BYTE_ARRAY_OFFSET,
key.length
);
Assert.assertFalse(loc.isDefined());
Assert.assertTrue(loc.append(
key,
Platform.BYTE_ARRAY_OFFSET,
key.length,
value,
Platform.BYTE_ARRAY_OFFSET,
value.length
));
// After calling putNewKey, the following should be true, even before calling
// lookup():
Assert.assertTrue(loc.isDefined());
Assert.assertEquals(key.length, loc.getKeyLength());
Assert.assertEquals(value.length, loc.getValueLength());
Assert.assertTrue(arrayEquals(key, loc.getKeyBase(), loc.getKeyOffset(), key.length));
Assert.assertTrue(
arrayEquals(value, loc.getValueBase(), loc.getValueOffset(), value.length));
}
}
for (Map.Entry<ByteBuffer, byte[]> entry : expected.entrySet()) {
final byte[] key = JavaUtils.bufferToArray(entry.getKey());
final byte[] value = entry.getValue();
final BytesToBytesMap.Location loc =
map.lookup(key, Platform.BYTE_ARRAY_OFFSET, key.length);
Assert.assertTrue(loc.isDefined());
Assert.assertTrue(
arrayEquals(key, loc.getKeyBase(), loc.getKeyOffset(), loc.getKeyLength()));
Assert.assertTrue(
arrayEquals(value, loc.getValueBase(), loc.getValueOffset(), loc.getValueLength()));
}
} finally {
map.free();
}
}
@Test
public void failureToAllocateFirstPage() {
memoryManager.limit(1024); // longArray
BytesToBytesMap map = new BytesToBytesMap(taskMemoryManager, 1, PAGE_SIZE_BYTES);
try {
final long[] emptyArray = new long[0];
final BytesToBytesMap.Location loc =
map.lookup(emptyArray, Platform.LONG_ARRAY_OFFSET, 0);
Assert.assertFalse(loc.isDefined());
Assert.assertFalse(loc.append(
emptyArray, Platform.LONG_ARRAY_OFFSET, 0, emptyArray, Platform.LONG_ARRAY_OFFSET, 0));
} finally {
map.free();
}
}
@Test
public void failureToGrow() {
BytesToBytesMap map = new BytesToBytesMap(taskMemoryManager, 1, 1024);
try {
boolean success = true;
int i;
for (i = 0; i < 127; i++) {
if (i > 0) {
memoryManager.limit(0);
}
final long[] arr = new long[]{i};
final BytesToBytesMap.Location loc = map.lookup(arr, Platform.LONG_ARRAY_OFFSET, 8);
success =
loc.append(arr, Platform.LONG_ARRAY_OFFSET, 8, arr, Platform.LONG_ARRAY_OFFSET, 8);
if (!success) {
break;
}
}
Assert.assertThat(i, greaterThan(0));
Assert.assertFalse(success);
} finally {
map.free();
}
}
@Test
public void spillInIterator() throws IOException {
BytesToBytesMap map = new BytesToBytesMap(
taskMemoryManager, blockManager, serializerManager, 1, 0.75, 1024, false);
try {
int i;
for (i = 0; i < 1024; i++) {
final long[] arr = new long[]{i};
final BytesToBytesMap.Location loc = map.lookup(arr, Platform.LONG_ARRAY_OFFSET, 8);
loc.append(arr, Platform.LONG_ARRAY_OFFSET, 8, arr, Platform.LONG_ARRAY_OFFSET, 8);
}
BytesToBytesMap.MapIterator iter = map.iterator();
for (i = 0; i < 100; i++) {
iter.next();
}
// Non-destructive iterator is not spillable
Assert.assertEquals(0, iter.spill(1024L * 10));
for (i = 100; i < 1024; i++) {
iter.next();
}
BytesToBytesMap.MapIterator iter2 = map.destructiveIterator();
for (i = 0; i < 100; i++) {
iter2.next();
}
Assert.assertTrue(iter2.spill(1024) >= 1024);
for (i = 100; i < 1024; i++) {
iter2.next();
}
assertFalse(iter2.hasNext());
} finally {
map.free();
for (File spillFile : spillFilesCreated) {
assertFalse("Spill file " + spillFile.getPath() + " was not cleaned up",
spillFile.exists());
}
}
}
@Test
public void multipleValuesForSameKey() {
BytesToBytesMap map =
new BytesToBytesMap(taskMemoryManager, blockManager, serializerManager, 1, 0.5, 1024, false);
try {
int i;
for (i = 0; i < 1024; i++) {
final long[] arr = new long[]{i};
map.lookup(arr, Platform.LONG_ARRAY_OFFSET, 8)
.append(arr, Platform.LONG_ARRAY_OFFSET, 8, arr, Platform.LONG_ARRAY_OFFSET, 8);
}
assert map.numKeys() == 1024;
assert map.numValues() == 1024;
for (i = 0; i < 1024; i++) {
final long[] arr = new long[]{i};
map.lookup(arr, Platform.LONG_ARRAY_OFFSET, 8)
.append(arr, Platform.LONG_ARRAY_OFFSET, 8, arr, Platform.LONG_ARRAY_OFFSET, 8);
}
assert map.numKeys() == 1024;
assert map.numValues() == 2048;
for (i = 0; i < 1024; i++) {
final long[] arr = new long[]{i};
final BytesToBytesMap.Location loc = map.lookup(arr, Platform.LONG_ARRAY_OFFSET, 8);
assert loc.isDefined();
assert loc.nextValue();
assert !loc.nextValue();
}
BytesToBytesMap.MapIterator iter = map.iterator();
for (i = 0; i < 2048; i++) {
assert iter.hasNext();
final BytesToBytesMap.Location loc = iter.next();
assert loc.isDefined();
}
} finally {
map.free();
}
}
@Test
public void initialCapacityBoundsChecking() {
try {
new BytesToBytesMap(taskMemoryManager, 0, PAGE_SIZE_BYTES);
Assert.fail("Expected IllegalArgumentException to be thrown");
} catch (IllegalArgumentException e) {
// expected exception
}
try {
new BytesToBytesMap(
taskMemoryManager,
BytesToBytesMap.MAX_CAPACITY + 1,
PAGE_SIZE_BYTES);
Assert.fail("Expected IllegalArgumentException to be thrown");
} catch (IllegalArgumentException e) {
// expected exception
}
}
@Test
public void testPeakMemoryUsed() {
final long recordLengthBytes = 32;
final long pageSizeBytes = 256 + 8; // 8 bytes for end-of-page marker
final long numRecordsPerPage = (pageSizeBytes - 8) / recordLengthBytes;
final BytesToBytesMap map = new BytesToBytesMap(taskMemoryManager, 1024, pageSizeBytes);
// Since BytesToBytesMap is append-only, we expect the total memory consumption to be
// monotonically increasing. More specifically, every time we allocate a new page it
// should increase by exactly the size of the page. In this regard, the memory usage
// at any given time is also the peak memory used.
long previousPeakMemory = map.getPeakMemoryUsedBytes();
long newPeakMemory;
try {
for (long i = 0; i < numRecordsPerPage * 10; i++) {
final long[] value = new long[]{i};
map.lookup(value, Platform.LONG_ARRAY_OFFSET, 8).append(
value,
Platform.LONG_ARRAY_OFFSET,
8,
value,
Platform.LONG_ARRAY_OFFSET,
8);
newPeakMemory = map.getPeakMemoryUsedBytes();
if (i % numRecordsPerPage == 0) {
// We allocated a new page for this record, so peak memory should change
assertEquals(previousPeakMemory + pageSizeBytes, newPeakMemory);
} else {
assertEquals(previousPeakMemory, newPeakMemory);
}
previousPeakMemory = newPeakMemory;
}
// Freeing the map should not change the peak memory
map.free();
newPeakMemory = map.getPeakMemoryUsedBytes();
assertEquals(previousPeakMemory, newPeakMemory);
} finally {
map.free();
}
}
@Test
public void avoidDeadlock() throws InterruptedException {
memoryManager.limit(PAGE_SIZE_BYTES);
MemoryMode mode = useOffHeapMemoryAllocator() ? MemoryMode.OFF_HEAP: MemoryMode.ON_HEAP;
TestMemoryConsumer c1 = new TestMemoryConsumer(taskMemoryManager, mode);
BytesToBytesMap map =
new BytesToBytesMap(taskMemoryManager, blockManager, serializerManager, 1, 0.5, 1024, false);
Thread thread = new Thread(() -> {
int i = 0;
long used = 0;
while (i < 10) {
c1.use(10000000);
used += 10000000;
i++;
}
c1.free(used);
});
try {
int i;
for (i = 0; i < 1024; i++) {
final long[] arr = new long[]{i};
final BytesToBytesMap.Location loc = map.lookup(arr, Platform.LONG_ARRAY_OFFSET, 8);
loc.append(arr, Platform.LONG_ARRAY_OFFSET, 8, arr, Platform.LONG_ARRAY_OFFSET, 8);
}
// Starts to require memory at another memory consumer.
thread.start();
BytesToBytesMap.MapIterator iter = map.destructiveIterator();
for (i = 0; i < 1024; i++) {
iter.next();
}
assertFalse(iter.hasNext());
} finally {
map.free();
thread.join();
for (File spillFile : spillFilesCreated) {
assertFalse("Spill file " + spillFile.getPath() + " was not cleaned up",
spillFile.exists());
}
}
}
}
| 9,605 |
0 | Create_ds/spark/core/src/test/java/org/apache/spark/api | Create_ds/spark/core/src/test/java/org/apache/spark/api/java/OptionalSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java;
import org.junit.Assert;
import org.junit.Test;
/**
* Tests {@link Optional}.
*/
public class OptionalSuite {
@Test
public void testEmpty() {
Assert.assertFalse(Optional.empty().isPresent());
Assert.assertNull(Optional.empty().orNull());
Assert.assertEquals("foo", Optional.empty().or("foo"));
Assert.assertEquals("foo", Optional.empty().orElse("foo"));
}
@Test(expected = NullPointerException.class)
public void testEmptyGet() {
Optional.empty().get();
}
@Test
public void testAbsent() {
Assert.assertFalse(Optional.absent().isPresent());
Assert.assertNull(Optional.absent().orNull());
Assert.assertEquals("foo", Optional.absent().or("foo"));
Assert.assertEquals("foo", Optional.absent().orElse("foo"));
}
@Test(expected = NullPointerException.class)
public void testAbsentGet() {
Optional.absent().get();
}
@Test
public void testOf() {
Assert.assertTrue(Optional.of(1).isPresent());
Assert.assertNotNull(Optional.of(1).orNull());
Assert.assertEquals(Integer.valueOf(1), Optional.of(1).get());
Assert.assertEquals(Integer.valueOf(1), Optional.of(1).or(2));
Assert.assertEquals(Integer.valueOf(1), Optional.of(1).orElse(2));
}
@Test(expected = NullPointerException.class)
public void testOfWithNull() {
Optional.of(null);
}
@Test
public void testOfNullable() {
Assert.assertTrue(Optional.ofNullable(1).isPresent());
Assert.assertNotNull(Optional.ofNullable(1).orNull());
Assert.assertEquals(Integer.valueOf(1), Optional.ofNullable(1).get());
Assert.assertEquals(Integer.valueOf(1), Optional.ofNullable(1).or(2));
Assert.assertEquals(Integer.valueOf(1), Optional.ofNullable(1).orElse(2));
Assert.assertFalse(Optional.ofNullable(null).isPresent());
Assert.assertNull(Optional.ofNullable(null).orNull());
Assert.assertEquals(Integer.valueOf(2), Optional.<Integer>ofNullable(null).or(2));
Assert.assertEquals(Integer.valueOf(2), Optional.<Integer>ofNullable(null).orElse(2));
}
@Test
public void testFromNullable() {
Assert.assertTrue(Optional.fromNullable(1).isPresent());
Assert.assertNotNull(Optional.fromNullable(1).orNull());
Assert.assertEquals(Integer.valueOf(1), Optional.fromNullable(1).get());
Assert.assertEquals(Integer.valueOf(1), Optional.fromNullable(1).or(2));
Assert.assertEquals(Integer.valueOf(1), Optional.fromNullable(1).orElse(2));
Assert.assertFalse(Optional.fromNullable(null).isPresent());
Assert.assertNull(Optional.fromNullable(null).orNull());
Assert.assertEquals(Integer.valueOf(2), Optional.<Integer>fromNullable(null).or(2));
Assert.assertEquals(Integer.valueOf(2), Optional.<Integer>fromNullable(null).orElse(2));
}
}
| 9,606 |
0 | Create_ds/spark/core/src/main/java/org/apache | Create_ds/spark/core/src/main/java/org/apache/spark/ExecutorPlugin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark;
import org.apache.spark.annotation.DeveloperApi;
/**
* A plugin which can be automatically instantiated within each Spark executor. Users can specify
* plugins which should be created with the "spark.executor.plugins" configuration. An instance
* of each plugin will be created for every executor, including those created by dynamic allocation,
* before the executor starts running any tasks.
*
* The specific api exposed to the end users still considered to be very unstable. We will
* hopefully be able to keep compatibility by providing default implementations for any methods
* added, but make no guarantees this will always be possible across all Spark releases.
*
* Spark does nothing to verify the plugin is doing legitimate things, or to manage the resources
* it uses. A plugin acquires the same privileges as the user running the task. A bad plugin
* could also interfere with task execution and make the executor fail in unexpected ways.
*/
@DeveloperApi
public interface ExecutorPlugin {
/**
* Initialize the executor plugin.
*
* <p>Each executor will, during its initialization, invoke this method on each
* plugin provided in the spark.executor.plugins configuration.</p>
*
* <p>Plugins should create threads in their implementation of this method for
* any polling, blocking, or intensive computation.</p>
*/
default void init() {}
/**
* Clean up and terminate this plugin.
*
* <p>This function is called during the executor shutdown phase. The executor
* will wait for the plugin to terminate before continuing its own shutdown.</p>
*/
default void shutdown() {}
}
| 9,607 |
0 | Create_ds/spark/core/src/main/java/org/apache | Create_ds/spark/core/src/main/java/org/apache/spark/SparkExecutorInfo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark;
import java.io.Serializable;
/**
* Exposes information about Spark Executors.
*
* This interface is not designed to be implemented outside of Spark. We may add additional methods
* which may break binary compatibility with outside implementations.
*/
public interface SparkExecutorInfo extends Serializable {
String host();
int port();
long cacheSize();
int numRunningTasks();
long usedOnHeapStorageMemory();
long usedOffHeapStorageMemory();
long totalOnHeapStorageMemory();
long totalOffHeapStorageMemory();
}
| 9,608 |
0 | Create_ds/spark/core/src/main/java/org/apache | Create_ds/spark/core/src/main/java/org/apache/spark/SparkStageInfo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark;
import java.io.Serializable;
/**
* Exposes information about Spark Stages.
*
* This interface is not designed to be implemented outside of Spark. We may add additional methods
* which may break binary compatibility with outside implementations.
*/
public interface SparkStageInfo extends Serializable {
int stageId();
int currentAttemptId();
long submissionTime();
String name();
int numTasks();
int numActiveTasks();
int numCompletedTasks();
int numFailedTasks();
}
| 9,609 |
0 | Create_ds/spark/core/src/main/java/org/apache | Create_ds/spark/core/src/main/java/org/apache/spark/SparkJobInfo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark;
import java.io.Serializable;
/**
* Exposes information about Spark Jobs.
*
* This interface is not designed to be implemented outside of Spark. We may add additional methods
* which may break binary compatibility with outside implementations.
*/
public interface SparkJobInfo extends Serializable {
int jobId();
int[] stageIds();
JobExecutionStatus status();
}
| 9,610 |
0 | Create_ds/spark/core/src/main/java/org/apache | Create_ds/spark/core/src/main/java/org/apache/spark/JobExecutionStatus.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark;
import org.apache.spark.util.EnumUtil;
public enum JobExecutionStatus {
RUNNING,
SUCCEEDED,
FAILED,
UNKNOWN;
public static JobExecutionStatus fromString(String str) {
return EnumUtil.parseIgnoreCase(JobExecutionStatus.class, str);
}
}
| 9,611 |
0 | Create_ds/spark/core/src/main/java/org/apache | Create_ds/spark/core/src/main/java/org/apache/spark/SparkFirehoseListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark;
import org.apache.spark.scheduler.*;
/**
* Class that allows users to receive all SparkListener events.
* Users should override the onEvent method.
*
* This is a concrete Java class in order to ensure that we don't forget to update it when adding
* new methods to SparkListener: forgetting to add a method will result in a compilation error (if
* this was a concrete Scala class, default implementations of new event handlers would be inherited
* from the SparkListener trait).
*/
public class SparkFirehoseListener implements SparkListenerInterface {
public void onEvent(SparkListenerEvent event) { }
@Override
public final void onStageCompleted(SparkListenerStageCompleted stageCompleted) {
onEvent(stageCompleted);
}
@Override
public final void onStageSubmitted(SparkListenerStageSubmitted stageSubmitted) {
onEvent(stageSubmitted);
}
@Override
public final void onTaskStart(SparkListenerTaskStart taskStart) {
onEvent(taskStart);
}
@Override
public final void onTaskGettingResult(SparkListenerTaskGettingResult taskGettingResult) {
onEvent(taskGettingResult);
}
@Override
public final void onTaskEnd(SparkListenerTaskEnd taskEnd) {
onEvent(taskEnd);
}
@Override
public final void onJobStart(SparkListenerJobStart jobStart) {
onEvent(jobStart);
}
@Override
public final void onJobEnd(SparkListenerJobEnd jobEnd) {
onEvent(jobEnd);
}
@Override
public final void onEnvironmentUpdate(SparkListenerEnvironmentUpdate environmentUpdate) {
onEvent(environmentUpdate);
}
@Override
public final void onBlockManagerAdded(SparkListenerBlockManagerAdded blockManagerAdded) {
onEvent(blockManagerAdded);
}
@Override
public final void onBlockManagerRemoved(SparkListenerBlockManagerRemoved blockManagerRemoved) {
onEvent(blockManagerRemoved);
}
@Override
public final void onUnpersistRDD(SparkListenerUnpersistRDD unpersistRDD) {
onEvent(unpersistRDD);
}
@Override
public final void onApplicationStart(SparkListenerApplicationStart applicationStart) {
onEvent(applicationStart);
}
@Override
public final void onApplicationEnd(SparkListenerApplicationEnd applicationEnd) {
onEvent(applicationEnd);
}
@Override
public final void onExecutorMetricsUpdate(
SparkListenerExecutorMetricsUpdate executorMetricsUpdate) {
onEvent(executorMetricsUpdate);
}
@Override
public final void onExecutorAdded(SparkListenerExecutorAdded executorAdded) {
onEvent(executorAdded);
}
@Override
public final void onExecutorRemoved(SparkListenerExecutorRemoved executorRemoved) {
onEvent(executorRemoved);
}
@Override
public final void onExecutorBlacklisted(SparkListenerExecutorBlacklisted executorBlacklisted) {
onEvent(executorBlacklisted);
}
@Override
public void onExecutorBlacklistedForStage(
SparkListenerExecutorBlacklistedForStage executorBlacklistedForStage) {
onEvent(executorBlacklistedForStage);
}
@Override
public void onNodeBlacklistedForStage(
SparkListenerNodeBlacklistedForStage nodeBlacklistedForStage) {
onEvent(nodeBlacklistedForStage);
}
@Override
public final void onExecutorUnblacklisted(
SparkListenerExecutorUnblacklisted executorUnblacklisted) {
onEvent(executorUnblacklisted);
}
@Override
public final void onNodeBlacklisted(SparkListenerNodeBlacklisted nodeBlacklisted) {
onEvent(nodeBlacklisted);
}
@Override
public final void onNodeUnblacklisted(SparkListenerNodeUnblacklisted nodeUnblacklisted) {
onEvent(nodeUnblacklisted);
}
@Override
public void onBlockUpdated(SparkListenerBlockUpdated blockUpdated) {
onEvent(blockUpdated);
}
@Override
public void onSpeculativeTaskSubmitted(SparkListenerSpeculativeTaskSubmitted speculativeTask) {
onEvent(speculativeTask);
}
@Override
public void onOtherEvent(SparkListenerEvent event) {
onEvent(event);
}
}
| 9,612 |
0 | Create_ds/spark/core/src/main/java/org/apache | Create_ds/spark/core/src/main/java/org/apache/spark/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Core Spark classes in Scala. A few classes here, such as {@link org.apache.spark.Accumulator}
* and {@link org.apache.spark.storage.StorageLevel}, are also used in Java, but the
* {@link org.apache.spark.api.java} package contains the main Java API.
*/
package org.apache.spark;
| 9,613 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/shuffle | Create_ds/spark/core/src/main/java/org/apache/spark/shuffle/sort/SpillInfo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle.sort;
import java.io.File;
import org.apache.spark.storage.TempShuffleBlockId;
/**
* Metadata for a block of data written by {@link ShuffleExternalSorter}.
*/
final class SpillInfo {
final long[] partitionLengths;
final File file;
final TempShuffleBlockId blockId;
SpillInfo(int numPartitions, File file, TempShuffleBlockId blockId) {
this.partitionLengths = new long[numPartitions];
this.file = file;
this.blockId = blockId;
}
}
| 9,614 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/shuffle | Create_ds/spark/core/src/main/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle.sort;
import javax.annotation.Nullable;
import java.io.*;
import java.nio.channels.FileChannel;
import java.util.Iterator;
import scala.Option;
import scala.Product2;
import scala.collection.JavaConverters;
import scala.reflect.ClassTag;
import scala.reflect.ClassTag$;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.io.ByteStreams;
import com.google.common.io.Closeables;
import com.google.common.io.Files;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.*;
import org.apache.spark.annotation.Private;
import org.apache.spark.executor.ShuffleWriteMetrics;
import org.apache.spark.io.CompressionCodec;
import org.apache.spark.io.CompressionCodec$;
import org.apache.spark.io.NioBufferedFileInputStream;
import org.apache.commons.io.output.CloseShieldOutputStream;
import org.apache.commons.io.output.CountingOutputStream;
import org.apache.spark.memory.TaskMemoryManager;
import org.apache.spark.network.util.LimitedInputStream;
import org.apache.spark.scheduler.MapStatus;
import org.apache.spark.scheduler.MapStatus$;
import org.apache.spark.serializer.SerializationStream;
import org.apache.spark.serializer.SerializerInstance;
import org.apache.spark.shuffle.IndexShuffleBlockResolver;
import org.apache.spark.shuffle.ShuffleWriter;
import org.apache.spark.storage.BlockManager;
import org.apache.spark.storage.TimeTrackingOutputStream;
import org.apache.spark.unsafe.Platform;
import org.apache.spark.util.Utils;
import org.apache.spark.internal.config.package$;
@Private
public class UnsafeShuffleWriter<K, V> extends ShuffleWriter<K, V> {
private static final Logger logger = LoggerFactory.getLogger(UnsafeShuffleWriter.class);
private static final ClassTag<Object> OBJECT_CLASS_TAG = ClassTag$.MODULE$.Object();
@VisibleForTesting
static final int DEFAULT_INITIAL_SORT_BUFFER_SIZE = 4096;
static final int DEFAULT_INITIAL_SER_BUFFER_SIZE = 1024 * 1024;
private final BlockManager blockManager;
private final IndexShuffleBlockResolver shuffleBlockResolver;
private final TaskMemoryManager memoryManager;
private final SerializerInstance serializer;
private final Partitioner partitioner;
private final ShuffleWriteMetrics writeMetrics;
private final int shuffleId;
private final int mapId;
private final TaskContext taskContext;
private final SparkConf sparkConf;
private final boolean transferToEnabled;
private final int initialSortBufferSize;
private final int inputBufferSizeInBytes;
private final int outputBufferSizeInBytes;
@Nullable private MapStatus mapStatus;
@Nullable private ShuffleExternalSorter sorter;
private long peakMemoryUsedBytes = 0;
/** Subclass of ByteArrayOutputStream that exposes `buf` directly. */
private static final class MyByteArrayOutputStream extends ByteArrayOutputStream {
MyByteArrayOutputStream(int size) { super(size); }
public byte[] getBuf() { return buf; }
}
private MyByteArrayOutputStream serBuffer;
private SerializationStream serOutputStream;
/**
* Are we in the process of stopping? Because map tasks can call stop() with success = true
* and then call stop() with success = false if they get an exception, we want to make sure
* we don't try deleting files, etc twice.
*/
private boolean stopping = false;
private class CloseAndFlushShieldOutputStream extends CloseShieldOutputStream {
CloseAndFlushShieldOutputStream(OutputStream outputStream) {
super(outputStream);
}
@Override
public void flush() {
// do nothing
}
}
public UnsafeShuffleWriter(
BlockManager blockManager,
IndexShuffleBlockResolver shuffleBlockResolver,
TaskMemoryManager memoryManager,
SerializedShuffleHandle<K, V> handle,
int mapId,
TaskContext taskContext,
SparkConf sparkConf) throws IOException {
final int numPartitions = handle.dependency().partitioner().numPartitions();
if (numPartitions > SortShuffleManager.MAX_SHUFFLE_OUTPUT_PARTITIONS_FOR_SERIALIZED_MODE()) {
throw new IllegalArgumentException(
"UnsafeShuffleWriter can only be used for shuffles with at most " +
SortShuffleManager.MAX_SHUFFLE_OUTPUT_PARTITIONS_FOR_SERIALIZED_MODE() +
" reduce partitions");
}
this.blockManager = blockManager;
this.shuffleBlockResolver = shuffleBlockResolver;
this.memoryManager = memoryManager;
this.mapId = mapId;
final ShuffleDependency<K, V, V> dep = handle.dependency();
this.shuffleId = dep.shuffleId();
this.serializer = dep.serializer().newInstance();
this.partitioner = dep.partitioner();
this.writeMetrics = taskContext.taskMetrics().shuffleWriteMetrics();
this.taskContext = taskContext;
this.sparkConf = sparkConf;
this.transferToEnabled = sparkConf.getBoolean("spark.file.transferTo", true);
this.initialSortBufferSize = sparkConf.getInt("spark.shuffle.sort.initialBufferSize",
DEFAULT_INITIAL_SORT_BUFFER_SIZE);
this.inputBufferSizeInBytes =
(int) (long) sparkConf.get(package$.MODULE$.SHUFFLE_FILE_BUFFER_SIZE()) * 1024;
this.outputBufferSizeInBytes =
(int) (long) sparkConf.get(package$.MODULE$.SHUFFLE_UNSAFE_FILE_OUTPUT_BUFFER_SIZE()) * 1024;
open();
}
private void updatePeakMemoryUsed() {
// sorter can be null if this writer is closed
if (sorter != null) {
long mem = sorter.getPeakMemoryUsedBytes();
if (mem > peakMemoryUsedBytes) {
peakMemoryUsedBytes = mem;
}
}
}
/**
* Return the peak memory used so far, in bytes.
*/
public long getPeakMemoryUsedBytes() {
updatePeakMemoryUsed();
return peakMemoryUsedBytes;
}
/**
* This convenience method should only be called in test code.
*/
@VisibleForTesting
public void write(Iterator<Product2<K, V>> records) throws IOException {
write(JavaConverters.asScalaIteratorConverter(records).asScala());
}
@Override
public void write(scala.collection.Iterator<Product2<K, V>> records) throws IOException {
// Keep track of success so we know if we encountered an exception
// We do this rather than a standard try/catch/re-throw to handle
// generic throwables.
boolean success = false;
try {
while (records.hasNext()) {
insertRecordIntoSorter(records.next());
}
closeAndWriteOutput();
success = true;
} finally {
if (sorter != null) {
try {
sorter.cleanupResources();
} catch (Exception e) {
// Only throw this error if we won't be masking another
// error.
if (success) {
throw e;
} else {
logger.error("In addition to a failure during writing, we failed during " +
"cleanup.", e);
}
}
}
}
}
private void open() {
assert (sorter == null);
sorter = new ShuffleExternalSorter(
memoryManager,
blockManager,
taskContext,
initialSortBufferSize,
partitioner.numPartitions(),
sparkConf,
writeMetrics);
serBuffer = new MyByteArrayOutputStream(DEFAULT_INITIAL_SER_BUFFER_SIZE);
serOutputStream = serializer.serializeStream(serBuffer);
}
@VisibleForTesting
void closeAndWriteOutput() throws IOException {
assert(sorter != null);
updatePeakMemoryUsed();
serBuffer = null;
serOutputStream = null;
final SpillInfo[] spills = sorter.closeAndGetSpills();
sorter = null;
final long[] partitionLengths;
final File output = shuffleBlockResolver.getDataFile(shuffleId, mapId);
final File tmp = Utils.tempFileWith(output);
try {
try {
partitionLengths = mergeSpills(spills, tmp);
} finally {
for (SpillInfo spill : spills) {
if (spill.file.exists() && ! spill.file.delete()) {
logger.error("Error while deleting spill file {}", spill.file.getPath());
}
}
}
shuffleBlockResolver.writeIndexFileAndCommit(shuffleId, mapId, partitionLengths, tmp);
} finally {
if (tmp.exists() && !tmp.delete()) {
logger.error("Error while deleting temp file {}", tmp.getAbsolutePath());
}
}
mapStatus = MapStatus$.MODULE$.apply(blockManager.shuffleServerId(), partitionLengths);
}
@VisibleForTesting
void insertRecordIntoSorter(Product2<K, V> record) throws IOException {
assert(sorter != null);
final K key = record._1();
final int partitionId = partitioner.getPartition(key);
serBuffer.reset();
serOutputStream.writeKey(key, OBJECT_CLASS_TAG);
serOutputStream.writeValue(record._2(), OBJECT_CLASS_TAG);
serOutputStream.flush();
final int serializedRecordSize = serBuffer.size();
assert (serializedRecordSize > 0);
sorter.insertRecord(
serBuffer.getBuf(), Platform.BYTE_ARRAY_OFFSET, serializedRecordSize, partitionId);
}
@VisibleForTesting
void forceSorterToSpill() throws IOException {
assert (sorter != null);
sorter.spill();
}
/**
* Merge zero or more spill files together, choosing the fastest merging strategy based on the
* number of spills and the IO compression codec.
*
* @return the partition lengths in the merged file.
*/
private long[] mergeSpills(SpillInfo[] spills, File outputFile) throws IOException {
final boolean compressionEnabled = sparkConf.getBoolean("spark.shuffle.compress", true);
final CompressionCodec compressionCodec = CompressionCodec$.MODULE$.createCodec(sparkConf);
final boolean fastMergeEnabled =
sparkConf.getBoolean("spark.shuffle.unsafe.fastMergeEnabled", true);
final boolean fastMergeIsSupported = !compressionEnabled ||
CompressionCodec$.MODULE$.supportsConcatenationOfSerializedStreams(compressionCodec);
final boolean encryptionEnabled = blockManager.serializerManager().encryptionEnabled();
try {
if (spills.length == 0) {
new FileOutputStream(outputFile).close(); // Create an empty file
return new long[partitioner.numPartitions()];
} else if (spills.length == 1) {
// Here, we don't need to perform any metrics updates because the bytes written to this
// output file would have already been counted as shuffle bytes written.
Files.move(spills[0].file, outputFile);
return spills[0].partitionLengths;
} else {
final long[] partitionLengths;
// There are multiple spills to merge, so none of these spill files' lengths were counted
// towards our shuffle write count or shuffle write time. If we use the slow merge path,
// then the final output file's size won't necessarily be equal to the sum of the spill
// files' sizes. To guard against this case, we look at the output file's actual size when
// computing shuffle bytes written.
//
// We allow the individual merge methods to report their own IO times since different merge
// strategies use different IO techniques. We count IO during merge towards the shuffle
// shuffle write time, which appears to be consistent with the "not bypassing merge-sort"
// branch in ExternalSorter.
if (fastMergeEnabled && fastMergeIsSupported) {
// Compression is disabled or we are using an IO compression codec that supports
// decompression of concatenated compressed streams, so we can perform a fast spill merge
// that doesn't need to interpret the spilled bytes.
if (transferToEnabled && !encryptionEnabled) {
logger.debug("Using transferTo-based fast merge");
partitionLengths = mergeSpillsWithTransferTo(spills, outputFile);
} else {
logger.debug("Using fileStream-based fast merge");
partitionLengths = mergeSpillsWithFileStream(spills, outputFile, null);
}
} else {
logger.debug("Using slow merge");
partitionLengths = mergeSpillsWithFileStream(spills, outputFile, compressionCodec);
}
// When closing an UnsafeShuffleExternalSorter that has already spilled once but also has
// in-memory records, we write out the in-memory records to a file but do not count that
// final write as bytes spilled (instead, it's accounted as shuffle write). The merge needs
// to be counted as shuffle write, but this will lead to double-counting of the final
// SpillInfo's bytes.
writeMetrics.decBytesWritten(spills[spills.length - 1].file.length());
writeMetrics.incBytesWritten(outputFile.length());
return partitionLengths;
}
} catch (IOException e) {
if (outputFile.exists() && !outputFile.delete()) {
logger.error("Unable to delete output file {}", outputFile.getPath());
}
throw e;
}
}
/**
* Merges spill files using Java FileStreams. This code path is typically slower than
* the NIO-based merge, {@link UnsafeShuffleWriter#mergeSpillsWithTransferTo(SpillInfo[],
* File)}, and it's mostly used in cases where the IO compression codec does not support
* concatenation of compressed data, when encryption is enabled, or when users have
* explicitly disabled use of {@code transferTo} in order to work around kernel bugs.
* This code path might also be faster in cases where individual partition size in a spill
* is small and UnsafeShuffleWriter#mergeSpillsWithTransferTo method performs many small
* disk ios which is inefficient. In those case, Using large buffers for input and output
* files helps reducing the number of disk ios, making the file merging faster.
*
* @param spills the spills to merge.
* @param outputFile the file to write the merged data to.
* @param compressionCodec the IO compression codec, or null if shuffle compression is disabled.
* @return the partition lengths in the merged file.
*/
private long[] mergeSpillsWithFileStream(
SpillInfo[] spills,
File outputFile,
@Nullable CompressionCodec compressionCodec) throws IOException {
assert (spills.length >= 2);
final int numPartitions = partitioner.numPartitions();
final long[] partitionLengths = new long[numPartitions];
final InputStream[] spillInputStreams = new InputStream[spills.length];
final OutputStream bos = new BufferedOutputStream(
new FileOutputStream(outputFile),
outputBufferSizeInBytes);
// Use a counting output stream to avoid having to close the underlying file and ask
// the file system for its size after each partition is written.
final CountingOutputStream mergedFileOutputStream = new CountingOutputStream(bos);
boolean threwException = true;
try {
for (int i = 0; i < spills.length; i++) {
spillInputStreams[i] = new NioBufferedFileInputStream(
spills[i].file,
inputBufferSizeInBytes);
}
for (int partition = 0; partition < numPartitions; partition++) {
final long initialFileLength = mergedFileOutputStream.getByteCount();
// Shield the underlying output stream from close() and flush() calls, so that we can close
// the higher level streams to make sure all data is really flushed and internal state is
// cleaned.
OutputStream partitionOutput = new CloseAndFlushShieldOutputStream(
new TimeTrackingOutputStream(writeMetrics, mergedFileOutputStream));
partitionOutput = blockManager.serializerManager().wrapForEncryption(partitionOutput);
if (compressionCodec != null) {
partitionOutput = compressionCodec.compressedOutputStream(partitionOutput);
}
for (int i = 0; i < spills.length; i++) {
final long partitionLengthInSpill = spills[i].partitionLengths[partition];
if (partitionLengthInSpill > 0) {
InputStream partitionInputStream = new LimitedInputStream(spillInputStreams[i],
partitionLengthInSpill, false);
try {
partitionInputStream = blockManager.serializerManager().wrapForEncryption(
partitionInputStream);
if (compressionCodec != null) {
partitionInputStream = compressionCodec.compressedInputStream(partitionInputStream);
}
ByteStreams.copy(partitionInputStream, partitionOutput);
} finally {
partitionInputStream.close();
}
}
}
partitionOutput.flush();
partitionOutput.close();
partitionLengths[partition] = (mergedFileOutputStream.getByteCount() - initialFileLength);
}
threwException = false;
} finally {
// To avoid masking exceptions that caused us to prematurely enter the finally block, only
// throw exceptions during cleanup if threwException == false.
for (InputStream stream : spillInputStreams) {
Closeables.close(stream, threwException);
}
Closeables.close(mergedFileOutputStream, threwException);
}
return partitionLengths;
}
/**
* Merges spill files by using NIO's transferTo to concatenate spill partitions' bytes.
* This is only safe when the IO compression codec and serializer support concatenation of
* serialized streams.
*
* @return the partition lengths in the merged file.
*/
private long[] mergeSpillsWithTransferTo(SpillInfo[] spills, File outputFile) throws IOException {
assert (spills.length >= 2);
final int numPartitions = partitioner.numPartitions();
final long[] partitionLengths = new long[numPartitions];
final FileChannel[] spillInputChannels = new FileChannel[spills.length];
final long[] spillInputChannelPositions = new long[spills.length];
FileChannel mergedFileOutputChannel = null;
boolean threwException = true;
try {
for (int i = 0; i < spills.length; i++) {
spillInputChannels[i] = new FileInputStream(spills[i].file).getChannel();
}
// This file needs to opened in append mode in order to work around a Linux kernel bug that
// affects transferTo; see SPARK-3948 for more details.
mergedFileOutputChannel = new FileOutputStream(outputFile, true).getChannel();
long bytesWrittenToMergedFile = 0;
for (int partition = 0; partition < numPartitions; partition++) {
for (int i = 0; i < spills.length; i++) {
final long partitionLengthInSpill = spills[i].partitionLengths[partition];
final FileChannel spillInputChannel = spillInputChannels[i];
final long writeStartTime = System.nanoTime();
Utils.copyFileStreamNIO(
spillInputChannel,
mergedFileOutputChannel,
spillInputChannelPositions[i],
partitionLengthInSpill);
spillInputChannelPositions[i] += partitionLengthInSpill;
writeMetrics.incWriteTime(System.nanoTime() - writeStartTime);
bytesWrittenToMergedFile += partitionLengthInSpill;
partitionLengths[partition] += partitionLengthInSpill;
}
}
// Check the position after transferTo loop to see if it is in the right position and raise an
// exception if it is incorrect. The position will not be increased to the expected length
// after calling transferTo in kernel version 2.6.32. This issue is described at
// https://bugs.openjdk.java.net/browse/JDK-7052359 and SPARK-3948.
if (mergedFileOutputChannel.position() != bytesWrittenToMergedFile) {
throw new IOException(
"Current position " + mergedFileOutputChannel.position() + " does not equal expected " +
"position " + bytesWrittenToMergedFile + " after transferTo. Please check your kernel" +
" version to see if it is 2.6.32, as there is a kernel bug which will lead to " +
"unexpected behavior when using transferTo. You can set spark.file.transferTo=false " +
"to disable this NIO feature."
);
}
threwException = false;
} finally {
// To avoid masking exceptions that caused us to prematurely enter the finally block, only
// throw exceptions during cleanup if threwException == false.
for (int i = 0; i < spills.length; i++) {
assert(spillInputChannelPositions[i] == spills[i].file.length());
Closeables.close(spillInputChannels[i], threwException);
}
Closeables.close(mergedFileOutputChannel, threwException);
}
return partitionLengths;
}
@Override
public Option<MapStatus> stop(boolean success) {
try {
taskContext.taskMetrics().incPeakExecutionMemory(getPeakMemoryUsedBytes());
if (stopping) {
return Option.apply(null);
} else {
stopping = true;
if (success) {
if (mapStatus == null) {
throw new IllegalStateException("Cannot call stop(true) without having called write()");
}
return Option.apply(mapStatus);
} else {
return Option.apply(null);
}
}
} finally {
if (sorter != null) {
// If sorter is non-null, then this implies that we called stop() in response to an error,
// so we need to clean up memory and spill files created by the sorter
sorter.cleanupResources();
}
}
}
}
| 9,615 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/shuffle | Create_ds/spark/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleSortDataFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle.sort;
import org.apache.spark.unsafe.Platform;
import org.apache.spark.unsafe.array.LongArray;
import org.apache.spark.util.collection.SortDataFormat;
final class ShuffleSortDataFormat extends SortDataFormat<PackedRecordPointer, LongArray> {
private final LongArray buffer;
ShuffleSortDataFormat(LongArray buffer) {
this.buffer = buffer;
}
@Override
public PackedRecordPointer getKey(LongArray data, int pos) {
// Since we re-use keys, this method shouldn't be called.
throw new UnsupportedOperationException();
}
@Override
public PackedRecordPointer newKey() {
return new PackedRecordPointer();
}
@Override
public PackedRecordPointer getKey(LongArray data, int pos, PackedRecordPointer reuse) {
reuse.set(data.get(pos));
return reuse;
}
@Override
public void swap(LongArray data, int pos0, int pos1) {
final long temp = data.get(pos0);
data.set(pos0, data.get(pos1));
data.set(pos1, temp);
}
@Override
public void copyElement(LongArray src, int srcPos, LongArray dst, int dstPos) {
dst.set(dstPos, src.get(srcPos));
}
@Override
public void copyRange(LongArray src, int srcPos, LongArray dst, int dstPos, int length) {
Platform.copyMemory(
src.getBaseObject(),
src.getBaseOffset() + srcPos * 8L,
dst.getBaseObject(),
dst.getBaseOffset() + dstPos * 8L,
length * 8L
);
}
@Override
public LongArray allocate(int length) {
assert (length <= buffer.size()) :
"the buffer is smaller than required: " + buffer.size() + " < " + length;
return buffer;
}
}
| 9,616 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/shuffle | Create_ds/spark/core/src/main/java/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle.sort;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import javax.annotation.Nullable;
import scala.None$;
import scala.Option;
import scala.Product2;
import scala.Tuple2;
import scala.collection.Iterator;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.io.Closeables;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.Partitioner;
import org.apache.spark.ShuffleDependency;
import org.apache.spark.SparkConf;
import org.apache.spark.TaskContext;
import org.apache.spark.executor.ShuffleWriteMetrics;
import org.apache.spark.scheduler.MapStatus;
import org.apache.spark.scheduler.MapStatus$;
import org.apache.spark.serializer.Serializer;
import org.apache.spark.serializer.SerializerInstance;
import org.apache.spark.shuffle.IndexShuffleBlockResolver;
import org.apache.spark.shuffle.ShuffleWriter;
import org.apache.spark.storage.*;
import org.apache.spark.util.Utils;
/**
* This class implements sort-based shuffle's hash-style shuffle fallback path. This write path
* writes incoming records to separate files, one file per reduce partition, then concatenates these
* per-partition files to form a single output file, regions of which are served to reducers.
* Records are not buffered in memory. It writes output in a format
* that can be served / consumed via {@link org.apache.spark.shuffle.IndexShuffleBlockResolver}.
* <p>
* This write path is inefficient for shuffles with large numbers of reduce partitions because it
* simultaneously opens separate serializers and file streams for all partitions. As a result,
* {@link SortShuffleManager} only selects this write path when
* <ul>
* <li>no Ordering is specified,</li>
* <li>no Aggregator is specified, and</li>
* <li>the number of partitions is less than
* <code>spark.shuffle.sort.bypassMergeThreshold</code>.</li>
* </ul>
*
* This code used to be part of {@link org.apache.spark.util.collection.ExternalSorter} but was
* refactored into its own class in order to reduce code complexity; see SPARK-7855 for details.
* <p>
* There have been proposals to completely remove this code path; see SPARK-6026 for details.
*/
final class BypassMergeSortShuffleWriter<K, V> extends ShuffleWriter<K, V> {
private static final Logger logger = LoggerFactory.getLogger(BypassMergeSortShuffleWriter.class);
private final int fileBufferSize;
private final boolean transferToEnabled;
private final int numPartitions;
private final BlockManager blockManager;
private final Partitioner partitioner;
private final ShuffleWriteMetrics writeMetrics;
private final int shuffleId;
private final int mapId;
private final Serializer serializer;
private final IndexShuffleBlockResolver shuffleBlockResolver;
/** Array of file writers, one for each partition */
private DiskBlockObjectWriter[] partitionWriters;
private FileSegment[] partitionWriterSegments;
@Nullable private MapStatus mapStatus;
private long[] partitionLengths;
/**
* Are we in the process of stopping? Because map tasks can call stop() with success = true
* and then call stop() with success = false if they get an exception, we want to make sure
* we don't try deleting files, etc twice.
*/
private boolean stopping = false;
BypassMergeSortShuffleWriter(
BlockManager blockManager,
IndexShuffleBlockResolver shuffleBlockResolver,
BypassMergeSortShuffleHandle<K, V> handle,
int mapId,
TaskContext taskContext,
SparkConf conf) {
// Use getSizeAsKb (not bytes) to maintain backwards compatibility if no units are provided
this.fileBufferSize = (int) conf.getSizeAsKb("spark.shuffle.file.buffer", "32k") * 1024;
this.transferToEnabled = conf.getBoolean("spark.file.transferTo", true);
this.blockManager = blockManager;
final ShuffleDependency<K, V, V> dep = handle.dependency();
this.mapId = mapId;
this.shuffleId = dep.shuffleId();
this.partitioner = dep.partitioner();
this.numPartitions = partitioner.numPartitions();
this.writeMetrics = taskContext.taskMetrics().shuffleWriteMetrics();
this.serializer = dep.serializer();
this.shuffleBlockResolver = shuffleBlockResolver;
}
@Override
public void write(Iterator<Product2<K, V>> records) throws IOException {
assert (partitionWriters == null);
if (!records.hasNext()) {
partitionLengths = new long[numPartitions];
shuffleBlockResolver.writeIndexFileAndCommit(shuffleId, mapId, partitionLengths, null);
mapStatus = MapStatus$.MODULE$.apply(blockManager.shuffleServerId(), partitionLengths);
return;
}
final SerializerInstance serInstance = serializer.newInstance();
final long openStartTime = System.nanoTime();
partitionWriters = new DiskBlockObjectWriter[numPartitions];
partitionWriterSegments = new FileSegment[numPartitions];
for (int i = 0; i < numPartitions; i++) {
final Tuple2<TempShuffleBlockId, File> tempShuffleBlockIdPlusFile =
blockManager.diskBlockManager().createTempShuffleBlock();
final File file = tempShuffleBlockIdPlusFile._2();
final BlockId blockId = tempShuffleBlockIdPlusFile._1();
partitionWriters[i] =
blockManager.getDiskWriter(blockId, file, serInstance, fileBufferSize, writeMetrics);
}
// Creating the file to write to and creating a disk writer both involve interacting with
// the disk, and can take a long time in aggregate when we open many files, so should be
// included in the shuffle write time.
writeMetrics.incWriteTime(System.nanoTime() - openStartTime);
while (records.hasNext()) {
final Product2<K, V> record = records.next();
final K key = record._1();
partitionWriters[partitioner.getPartition(key)].write(key, record._2());
}
for (int i = 0; i < numPartitions; i++) {
final DiskBlockObjectWriter writer = partitionWriters[i];
partitionWriterSegments[i] = writer.commitAndGet();
writer.close();
}
File output = shuffleBlockResolver.getDataFile(shuffleId, mapId);
File tmp = Utils.tempFileWith(output);
try {
partitionLengths = writePartitionedFile(tmp);
shuffleBlockResolver.writeIndexFileAndCommit(shuffleId, mapId, partitionLengths, tmp);
} finally {
if (tmp.exists() && !tmp.delete()) {
logger.error("Error while deleting temp file {}", tmp.getAbsolutePath());
}
}
mapStatus = MapStatus$.MODULE$.apply(blockManager.shuffleServerId(), partitionLengths);
}
@VisibleForTesting
long[] getPartitionLengths() {
return partitionLengths;
}
/**
* Concatenate all of the per-partition files into a single combined file.
*
* @return array of lengths, in bytes, of each partition of the file (used by map output tracker).
*/
private long[] writePartitionedFile(File outputFile) throws IOException {
// Track location of the partition starts in the output file
final long[] lengths = new long[numPartitions];
if (partitionWriters == null) {
// We were passed an empty iterator
return lengths;
}
final FileOutputStream out = new FileOutputStream(outputFile, true);
final long writeStartTime = System.nanoTime();
boolean threwException = true;
try {
for (int i = 0; i < numPartitions; i++) {
final File file = partitionWriterSegments[i].file();
if (file.exists()) {
final FileInputStream in = new FileInputStream(file);
boolean copyThrewException = true;
try {
lengths[i] = Utils.copyStream(in, out, false, transferToEnabled);
copyThrewException = false;
} finally {
Closeables.close(in, copyThrewException);
}
if (!file.delete()) {
logger.error("Unable to delete file for partition {}", i);
}
}
}
threwException = false;
} finally {
Closeables.close(out, threwException);
writeMetrics.incWriteTime(System.nanoTime() - writeStartTime);
}
partitionWriters = null;
return lengths;
}
@Override
public Option<MapStatus> stop(boolean success) {
if (stopping) {
return None$.empty();
} else {
stopping = true;
if (success) {
if (mapStatus == null) {
throw new IllegalStateException("Cannot call stop(true) without having called write()");
}
return Option.apply(mapStatus);
} else {
// The map task failed, so delete our output data.
if (partitionWriters != null) {
try {
for (DiskBlockObjectWriter writer : partitionWriters) {
// This method explicitly does _not_ throw exceptions:
File file = writer.revertPartialWritesAndClose();
if (!file.delete()) {
logger.error("Error while deleting file {}", file.getAbsolutePath());
}
}
} finally {
partitionWriters = null;
}
}
return None$.empty();
}
}
}
}
| 9,617 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/shuffle | Create_ds/spark/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle.sort;
import javax.annotation.Nullable;
import java.io.File;
import java.io.IOException;
import java.util.LinkedList;
import scala.Tuple2;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.SparkConf;
import org.apache.spark.TaskContext;
import org.apache.spark.executor.ShuffleWriteMetrics;
import org.apache.spark.internal.config.package$;
import org.apache.spark.memory.MemoryConsumer;
import org.apache.spark.memory.SparkOutOfMemoryError;
import org.apache.spark.memory.TaskMemoryManager;
import org.apache.spark.memory.TooLargePageException;
import org.apache.spark.serializer.DummySerializerInstance;
import org.apache.spark.serializer.SerializerInstance;
import org.apache.spark.storage.BlockManager;
import org.apache.spark.storage.DiskBlockObjectWriter;
import org.apache.spark.storage.FileSegment;
import org.apache.spark.storage.TempShuffleBlockId;
import org.apache.spark.unsafe.Platform;
import org.apache.spark.unsafe.UnsafeAlignedOffset;
import org.apache.spark.unsafe.array.LongArray;
import org.apache.spark.unsafe.memory.MemoryBlock;
import org.apache.spark.util.Utils;
/**
* An external sorter that is specialized for sort-based shuffle.
* <p>
* Incoming records are appended to data pages. When all records have been inserted (or when the
* current thread's shuffle memory limit is reached), the in-memory records are sorted according to
* their partition ids (using a {@link ShuffleInMemorySorter}). The sorted records are then
* written to a single output file (or multiple files, if we've spilled). The format of the output
* files is the same as the format of the final output file written by
* {@link org.apache.spark.shuffle.sort.SortShuffleWriter}: each output partition's records are
* written as a single serialized, compressed stream that can be read with a new decompression and
* deserialization stream.
* <p>
* Unlike {@link org.apache.spark.util.collection.ExternalSorter}, this sorter does not merge its
* spill files. Instead, this merging is performed in {@link UnsafeShuffleWriter}, which uses a
* specialized merge procedure that avoids extra serialization/deserialization.
*/
final class ShuffleExternalSorter extends MemoryConsumer {
private static final Logger logger = LoggerFactory.getLogger(ShuffleExternalSorter.class);
@VisibleForTesting
static final int DISK_WRITE_BUFFER_SIZE = 1024 * 1024;
private final int numPartitions;
private final TaskMemoryManager taskMemoryManager;
private final BlockManager blockManager;
private final TaskContext taskContext;
private final ShuffleWriteMetrics writeMetrics;
/**
* Force this sorter to spill when there are this many elements in memory.
*/
private final int numElementsForSpillThreshold;
/** The buffer size to use when writing spills using DiskBlockObjectWriter */
private final int fileBufferSizeBytes;
/** The buffer size to use when writing the sorted records to an on-disk file */
private final int diskWriteBufferSize;
/**
* Memory pages that hold the records being sorted. The pages in this list are freed when
* spilling, although in principle we could recycle these pages across spills (on the other hand,
* this might not be necessary if we maintained a pool of re-usable pages in the TaskMemoryManager
* itself).
*/
private final LinkedList<MemoryBlock> allocatedPages = new LinkedList<>();
private final LinkedList<SpillInfo> spills = new LinkedList<>();
/** Peak memory used by this sorter so far, in bytes. **/
private long peakMemoryUsedBytes;
// These variables are reset after spilling:
@Nullable private ShuffleInMemorySorter inMemSorter;
@Nullable private MemoryBlock currentPage = null;
private long pageCursor = -1;
ShuffleExternalSorter(
TaskMemoryManager memoryManager,
BlockManager blockManager,
TaskContext taskContext,
int initialSize,
int numPartitions,
SparkConf conf,
ShuffleWriteMetrics writeMetrics) {
super(memoryManager,
(int) Math.min(PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES, memoryManager.pageSizeBytes()),
memoryManager.getTungstenMemoryMode());
this.taskMemoryManager = memoryManager;
this.blockManager = blockManager;
this.taskContext = taskContext;
this.numPartitions = numPartitions;
// Use getSizeAsKb (not bytes) to maintain backwards compatibility if no units are provided
this.fileBufferSizeBytes =
(int) (long) conf.get(package$.MODULE$.SHUFFLE_FILE_BUFFER_SIZE()) * 1024;
this.numElementsForSpillThreshold =
(int) conf.get(package$.MODULE$.SHUFFLE_SPILL_NUM_ELEMENTS_FORCE_SPILL_THRESHOLD());
this.writeMetrics = writeMetrics;
this.inMemSorter = new ShuffleInMemorySorter(
this, initialSize, conf.getBoolean("spark.shuffle.sort.useRadixSort", true));
this.peakMemoryUsedBytes = getMemoryUsage();
this.diskWriteBufferSize =
(int) (long) conf.get(package$.MODULE$.SHUFFLE_DISK_WRITE_BUFFER_SIZE());
}
/**
* Sorts the in-memory records and writes the sorted records to an on-disk file.
* This method does not free the sort data structures.
*
* @param isLastFile if true, this indicates that we're writing the final output file and that the
* bytes written should be counted towards shuffle spill metrics rather than
* shuffle write metrics.
*/
private void writeSortedFile(boolean isLastFile) {
final ShuffleWriteMetrics writeMetricsToUse;
if (isLastFile) {
// We're writing the final non-spill file, so we _do_ want to count this as shuffle bytes.
writeMetricsToUse = writeMetrics;
} else {
// We're spilling, so bytes written should be counted towards spill rather than write.
// Create a dummy WriteMetrics object to absorb these metrics, since we don't want to count
// them towards shuffle bytes written.
writeMetricsToUse = new ShuffleWriteMetrics();
}
// This call performs the actual sort.
final ShuffleInMemorySorter.ShuffleSorterIterator sortedRecords =
inMemSorter.getSortedIterator();
// Small writes to DiskBlockObjectWriter will be fairly inefficient. Since there doesn't seem to
// be an API to directly transfer bytes from managed memory to the disk writer, we buffer
// data through a byte array. This array does not need to be large enough to hold a single
// record;
final byte[] writeBuffer = new byte[diskWriteBufferSize];
// Because this output will be read during shuffle, its compression codec must be controlled by
// spark.shuffle.compress instead of spark.shuffle.spill.compress, so we need to use
// createTempShuffleBlock here; see SPARK-3426 for more details.
final Tuple2<TempShuffleBlockId, File> spilledFileInfo =
blockManager.diskBlockManager().createTempShuffleBlock();
final File file = spilledFileInfo._2();
final TempShuffleBlockId blockId = spilledFileInfo._1();
final SpillInfo spillInfo = new SpillInfo(numPartitions, file, blockId);
// Unfortunately, we need a serializer instance in order to construct a DiskBlockObjectWriter.
// Our write path doesn't actually use this serializer (since we end up calling the `write()`
// OutputStream methods), but DiskBlockObjectWriter still calls some methods on it. To work
// around this, we pass a dummy no-op serializer.
final SerializerInstance ser = DummySerializerInstance.INSTANCE;
final DiskBlockObjectWriter writer =
blockManager.getDiskWriter(blockId, file, ser, fileBufferSizeBytes, writeMetricsToUse);
int currentPartition = -1;
final int uaoSize = UnsafeAlignedOffset.getUaoSize();
while (sortedRecords.hasNext()) {
sortedRecords.loadNext();
final int partition = sortedRecords.packedRecordPointer.getPartitionId();
assert (partition >= currentPartition);
if (partition != currentPartition) {
// Switch to the new partition
if (currentPartition != -1) {
final FileSegment fileSegment = writer.commitAndGet();
spillInfo.partitionLengths[currentPartition] = fileSegment.length();
}
currentPartition = partition;
}
final long recordPointer = sortedRecords.packedRecordPointer.getRecordPointer();
final Object recordPage = taskMemoryManager.getPage(recordPointer);
final long recordOffsetInPage = taskMemoryManager.getOffsetInPage(recordPointer);
int dataRemaining = UnsafeAlignedOffset.getSize(recordPage, recordOffsetInPage);
long recordReadPosition = recordOffsetInPage + uaoSize; // skip over record length
while (dataRemaining > 0) {
final int toTransfer = Math.min(diskWriteBufferSize, dataRemaining);
Platform.copyMemory(
recordPage, recordReadPosition, writeBuffer, Platform.BYTE_ARRAY_OFFSET, toTransfer);
writer.write(writeBuffer, 0, toTransfer);
recordReadPosition += toTransfer;
dataRemaining -= toTransfer;
}
writer.recordWritten();
}
final FileSegment committedSegment = writer.commitAndGet();
writer.close();
// If `writeSortedFile()` was called from `closeAndGetSpills()` and no records were inserted,
// then the file might be empty. Note that it might be better to avoid calling
// writeSortedFile() in that case.
if (currentPartition != -1) {
spillInfo.partitionLengths[currentPartition] = committedSegment.length();
spills.add(spillInfo);
}
if (!isLastFile) { // i.e. this is a spill file
// The current semantics of `shuffleRecordsWritten` seem to be that it's updated when records
// are written to disk, not when they enter the shuffle sorting code. DiskBlockObjectWriter
// relies on its `recordWritten()` method being called in order to trigger periodic updates to
// `shuffleBytesWritten`. If we were to remove the `recordWritten()` call and increment that
// counter at a higher-level, then the in-progress metrics for records written and bytes
// written would get out of sync.
//
// When writing the last file, we pass `writeMetrics` directly to the DiskBlockObjectWriter;
// in all other cases, we pass in a dummy write metrics to capture metrics, then copy those
// metrics to the true write metrics here. The reason for performing this copying is so that
// we can avoid reporting spilled bytes as shuffle write bytes.
//
// Note that we intentionally ignore the value of `writeMetricsToUse.shuffleWriteTime()`.
// Consistent with ExternalSorter, we do not count this IO towards shuffle write time.
// This means that this IO time is not accounted for anywhere; SPARK-3577 will fix this.
writeMetrics.incRecordsWritten(writeMetricsToUse.recordsWritten());
taskContext.taskMetrics().incDiskBytesSpilled(writeMetricsToUse.bytesWritten());
}
}
/**
* Sort and spill the current records in response to memory pressure.
*/
@Override
public long spill(long size, MemoryConsumer trigger) throws IOException {
if (trigger != this || inMemSorter == null || inMemSorter.numRecords() == 0) {
return 0L;
}
logger.info("Thread {} spilling sort data of {} to disk ({} {} so far)",
Thread.currentThread().getId(),
Utils.bytesToString(getMemoryUsage()),
spills.size(),
spills.size() > 1 ? " times" : " time");
writeSortedFile(false);
final long spillSize = freeMemory();
inMemSorter.reset();
// Reset the in-memory sorter's pointer array only after freeing up the memory pages holding the
// records. Otherwise, if the task is over allocated memory, then without freeing the memory
// pages, we might not be able to get memory for the pointer array.
taskContext.taskMetrics().incMemoryBytesSpilled(spillSize);
return spillSize;
}
private long getMemoryUsage() {
long totalPageSize = 0;
for (MemoryBlock page : allocatedPages) {
totalPageSize += page.size();
}
return ((inMemSorter == null) ? 0 : inMemSorter.getMemoryUsage()) + totalPageSize;
}
private void updatePeakMemoryUsed() {
long mem = getMemoryUsage();
if (mem > peakMemoryUsedBytes) {
peakMemoryUsedBytes = mem;
}
}
/**
* Return the peak memory used so far, in bytes.
*/
long getPeakMemoryUsedBytes() {
updatePeakMemoryUsed();
return peakMemoryUsedBytes;
}
private long freeMemory() {
updatePeakMemoryUsed();
long memoryFreed = 0;
for (MemoryBlock block : allocatedPages) {
memoryFreed += block.size();
freePage(block);
}
allocatedPages.clear();
currentPage = null;
pageCursor = 0;
return memoryFreed;
}
/**
* Force all memory and spill files to be deleted; called by shuffle error-handling code.
*/
public void cleanupResources() {
freeMemory();
if (inMemSorter != null) {
inMemSorter.free();
inMemSorter = null;
}
for (SpillInfo spill : spills) {
if (spill.file.exists() && !spill.file.delete()) {
logger.error("Unable to delete spill file {}", spill.file.getPath());
}
}
}
/**
* Checks whether there is enough space to insert an additional record in to the sort pointer
* array and grows the array if additional space is required. If the required space cannot be
* obtained, then the in-memory data will be spilled to disk.
*/
private void growPointerArrayIfNecessary() throws IOException {
assert(inMemSorter != null);
if (!inMemSorter.hasSpaceForAnotherRecord()) {
long used = inMemSorter.getMemoryUsage();
LongArray array;
try {
// could trigger spilling
array = allocateArray(used / 8 * 2);
} catch (TooLargePageException e) {
// The pointer array is too big to fix in a single page, spill.
spill();
return;
} catch (SparkOutOfMemoryError e) {
// should have trigger spilling
if (!inMemSorter.hasSpaceForAnotherRecord()) {
logger.error("Unable to grow the pointer array");
throw e;
}
return;
}
// check if spilling is triggered or not
if (inMemSorter.hasSpaceForAnotherRecord()) {
freeArray(array);
} else {
inMemSorter.expandPointerArray(array);
}
}
}
/**
* Allocates more memory in order to insert an additional record. This will request additional
* memory from the memory manager and spill if the requested memory can not be obtained.
*
* @param required the required space in the data page, in bytes, including space for storing
* the record size. This must be less than or equal to the page size (records
* that exceed the page size are handled via a different code path which uses
* special overflow pages).
*/
private void acquireNewPageIfNecessary(int required) {
if (currentPage == null ||
pageCursor + required > currentPage.getBaseOffset() + currentPage.size() ) {
// TODO: try to find space in previous pages
currentPage = allocatePage(required);
pageCursor = currentPage.getBaseOffset();
allocatedPages.add(currentPage);
}
}
/**
* Write a record to the shuffle sorter.
*/
public void insertRecord(Object recordBase, long recordOffset, int length, int partitionId)
throws IOException {
// for tests
assert(inMemSorter != null);
if (inMemSorter.numRecords() >= numElementsForSpillThreshold) {
logger.info("Spilling data because number of spilledRecords crossed the threshold " +
numElementsForSpillThreshold);
spill();
}
growPointerArrayIfNecessary();
final int uaoSize = UnsafeAlignedOffset.getUaoSize();
// Need 4 or 8 bytes to store the record length.
final int required = length + uaoSize;
acquireNewPageIfNecessary(required);
assert(currentPage != null);
final Object base = currentPage.getBaseObject();
final long recordAddress = taskMemoryManager.encodePageNumberAndOffset(currentPage, pageCursor);
UnsafeAlignedOffset.putSize(base, pageCursor, length);
pageCursor += uaoSize;
Platform.copyMemory(recordBase, recordOffset, base, pageCursor, length);
pageCursor += length;
inMemSorter.insertRecord(recordAddress, partitionId);
}
/**
* Close the sorter, causing any buffered data to be sorted and written out to disk.
*
* @return metadata for the spill files written by this sorter. If no records were ever inserted
* into this sorter, then this will return an empty array.
* @throws IOException
*/
public SpillInfo[] closeAndGetSpills() throws IOException {
if (inMemSorter != null) {
// Do not count the final file towards the spill count.
writeSortedFile(true);
freeMemory();
inMemSorter.free();
inMemSorter = null;
}
return spills.toArray(new SpillInfo[spills.size()]);
}
}
| 9,618 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/shuffle | Create_ds/spark/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleInMemorySorter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle.sort;
import java.util.Comparator;
import org.apache.spark.memory.MemoryConsumer;
import org.apache.spark.unsafe.Platform;
import org.apache.spark.unsafe.array.LongArray;
import org.apache.spark.unsafe.memory.MemoryBlock;
import org.apache.spark.util.collection.Sorter;
import org.apache.spark.util.collection.unsafe.sort.RadixSort;
final class ShuffleInMemorySorter {
private static final class SortComparator implements Comparator<PackedRecordPointer> {
@Override
public int compare(PackedRecordPointer left, PackedRecordPointer right) {
int leftId = left.getPartitionId();
int rightId = right.getPartitionId();
return leftId < rightId ? -1 : (leftId > rightId ? 1 : 0);
}
}
private static final SortComparator SORT_COMPARATOR = new SortComparator();
private final MemoryConsumer consumer;
/**
* An array of record pointers and partition ids that have been encoded by
* {@link PackedRecordPointer}. The sort operates on this array instead of directly manipulating
* records.
*
* Only part of the array will be used to store the pointers, the rest part is preserved as
* temporary buffer for sorting.
*/
private LongArray array;
/**
* Whether to use radix sort for sorting in-memory partition ids. Radix sort is much faster
* but requires additional memory to be reserved memory as pointers are added.
*/
private final boolean useRadixSort;
/**
* The position in the pointer array where new records can be inserted.
*/
private int pos = 0;
/**
* How many records could be inserted, because part of the array should be left for sorting.
*/
private int usableCapacity = 0;
private final int initialSize;
ShuffleInMemorySorter(MemoryConsumer consumer, int initialSize, boolean useRadixSort) {
this.consumer = consumer;
assert (initialSize > 0);
this.initialSize = initialSize;
this.useRadixSort = useRadixSort;
this.array = consumer.allocateArray(initialSize);
this.usableCapacity = getUsableCapacity();
}
private int getUsableCapacity() {
// Radix sort requires same amount of used memory as buffer, Tim sort requires
// half of the used memory as buffer.
return (int) (array.size() / (useRadixSort ? 2 : 1.5));
}
public void free() {
if (array != null) {
consumer.freeArray(array);
array = null;
}
}
public int numRecords() {
return pos;
}
public void reset() {
// Reset `pos` here so that `spill` triggered by the below `allocateArray` will be no-op.
pos = 0;
if (consumer != null) {
consumer.freeArray(array);
// As `array` has been released, we should set it to `null` to avoid accessing it before
// `allocateArray` returns. `usableCapacity` is also set to `0` to avoid any codes writing
// data to `ShuffleInMemorySorter` when `array` is `null` (e.g., in
// ShuffleExternalSorter.growPointerArrayIfNecessary, we may try to access
// `ShuffleInMemorySorter` when `allocateArray` throws SparkOutOfMemoryError).
array = null;
usableCapacity = 0;
array = consumer.allocateArray(initialSize);
usableCapacity = getUsableCapacity();
}
}
public void expandPointerArray(LongArray newArray) {
assert(newArray.size() > array.size());
Platform.copyMemory(
array.getBaseObject(),
array.getBaseOffset(),
newArray.getBaseObject(),
newArray.getBaseOffset(),
pos * 8L
);
consumer.freeArray(array);
array = newArray;
usableCapacity = getUsableCapacity();
}
public boolean hasSpaceForAnotherRecord() {
return pos < usableCapacity;
}
public long getMemoryUsage() {
return array.size() * 8;
}
/**
* Inserts a record to be sorted.
*
* @param recordPointer a pointer to the record, encoded by the task memory manager. Due to
* certain pointer compression techniques used by the sorter, the sort can
* only operate on pointers that point to locations in the first
* {@link PackedRecordPointer#MAXIMUM_PAGE_SIZE_BYTES} bytes of a data page.
* @param partitionId the partition id, which must be less than or equal to
* {@link PackedRecordPointer#MAXIMUM_PARTITION_ID}.
*/
public void insertRecord(long recordPointer, int partitionId) {
if (!hasSpaceForAnotherRecord()) {
throw new IllegalStateException("There is no space for new record");
}
array.set(pos, PackedRecordPointer.packPointer(recordPointer, partitionId));
pos++;
}
/**
* An iterator-like class that's used instead of Java's Iterator in order to facilitate inlining.
*/
public static final class ShuffleSorterIterator {
private final LongArray pointerArray;
private final int limit;
final PackedRecordPointer packedRecordPointer = new PackedRecordPointer();
private int position = 0;
ShuffleSorterIterator(int numRecords, LongArray pointerArray, int startingPosition) {
this.limit = numRecords + startingPosition;
this.pointerArray = pointerArray;
this.position = startingPosition;
}
public boolean hasNext() {
return position < limit;
}
public void loadNext() {
packedRecordPointer.set(pointerArray.get(position));
position++;
}
}
/**
* Return an iterator over record pointers in sorted order.
*/
public ShuffleSorterIterator getSortedIterator() {
int offset = 0;
if (useRadixSort) {
offset = RadixSort.sort(
array, pos,
PackedRecordPointer.PARTITION_ID_START_BYTE_INDEX,
PackedRecordPointer.PARTITION_ID_END_BYTE_INDEX, false, false);
} else {
MemoryBlock unused = new MemoryBlock(
array.getBaseObject(),
array.getBaseOffset() + pos * 8L,
(array.size() - pos) * 8L);
LongArray buffer = new LongArray(unused);
Sorter<PackedRecordPointer, LongArray> sorter =
new Sorter<>(new ShuffleSortDataFormat(buffer));
sorter.sort(array, 0, pos, SORT_COMPARATOR);
}
return new ShuffleSorterIterator(pos, array, offset);
}
}
| 9,619 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/shuffle | Create_ds/spark/core/src/main/java/org/apache/spark/shuffle/sort/PackedRecordPointer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle.sort;
/**
* Wrapper around an 8-byte word that holds a 24-bit partition number and 40-bit record pointer.
* <p>
* Within the long, the data is laid out as follows:
* <pre>
* [24 bit partition number][13 bit memory page number][27 bit offset in page]
* </pre>
* This implies that the maximum addressable page size is 2^27 bits = 128 megabytes, assuming that
* our offsets in pages are not 8-byte-word-aligned. Since we have 2^13 pages (based off the
* 13-bit page numbers assigned by {@link org.apache.spark.memory.TaskMemoryManager}), this
* implies that we can address 2^13 * 128 megabytes = 1 terabyte of RAM per task.
* <p>
* Assuming word-alignment would allow for a 1 gigabyte maximum page size, but we leave this
* optimization to future work as it will require more careful design to ensure that addresses are
* properly aligned (e.g. by padding records).
*/
final class PackedRecordPointer {
static final int MAXIMUM_PAGE_SIZE_BYTES = 1 << 27; // 128 megabytes
/**
* The maximum partition identifier that can be encoded. Note that partition ids start from 0.
*/
static final int MAXIMUM_PARTITION_ID = (1 << 24) - 1; // 16777215
/**
* The index of the first byte of the partition id, counting from the least significant byte.
*/
static final int PARTITION_ID_START_BYTE_INDEX = 5;
/**
* The index of the last byte of the partition id, counting from the least significant byte.
*/
static final int PARTITION_ID_END_BYTE_INDEX = 7;
/** Bit mask for the lower 40 bits of a long. */
private static final long MASK_LONG_LOWER_40_BITS = (1L << 40) - 1;
/** Bit mask for the upper 24 bits of a long */
private static final long MASK_LONG_UPPER_24_BITS = ~MASK_LONG_LOWER_40_BITS;
/** Bit mask for the lower 27 bits of a long. */
private static final long MASK_LONG_LOWER_27_BITS = (1L << 27) - 1;
/** Bit mask for the lower 51 bits of a long. */
private static final long MASK_LONG_LOWER_51_BITS = (1L << 51) - 1;
/** Bit mask for the upper 13 bits of a long */
private static final long MASK_LONG_UPPER_13_BITS = ~MASK_LONG_LOWER_51_BITS;
/**
* Pack a record address and partition id into a single word.
*
* @param recordPointer a record pointer encoded by TaskMemoryManager.
* @param partitionId a shuffle partition id (maximum value of 2^24).
* @return a packed pointer that can be decoded using the {@link PackedRecordPointer} class.
*/
public static long packPointer(long recordPointer, int partitionId) {
assert (partitionId <= MAXIMUM_PARTITION_ID);
// Note that without word alignment we can address 2^27 bytes = 128 megabytes per page.
// Also note that this relies on some internals of how TaskMemoryManager encodes its addresses.
final long pageNumber = (recordPointer & MASK_LONG_UPPER_13_BITS) >>> 24;
final long compressedAddress = pageNumber | (recordPointer & MASK_LONG_LOWER_27_BITS);
return (((long) partitionId) << 40) | compressedAddress;
}
private long packedRecordPointer;
public void set(long packedRecordPointer) {
this.packedRecordPointer = packedRecordPointer;
}
public int getPartitionId() {
return (int) ((packedRecordPointer & MASK_LONG_UPPER_24_BITS) >>> 40);
}
public long getRecordPointer() {
final long pageNumber = (packedRecordPointer << 24) & MASK_LONG_UPPER_13_BITS;
final long offsetInPage = packedRecordPointer & MASK_LONG_LOWER_27_BITS;
return pageNumber | offsetInPage;
}
}
| 9,620 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark | Create_ds/spark/core/src/main/java/org/apache/spark/serializer/DummySerializerInstance.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.serializer;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import scala.reflect.ClassTag;
import org.apache.spark.annotation.Private;
import org.apache.spark.unsafe.Platform;
/**
* Unfortunately, we need a serializer instance in order to construct a DiskBlockObjectWriter.
* Our shuffle write path doesn't actually use this serializer (since we end up calling the
* `write() OutputStream methods), but DiskBlockObjectWriter still calls some methods on it. To work
* around this, we pass a dummy no-op serializer.
*/
@Private
public final class DummySerializerInstance extends SerializerInstance {
public static final DummySerializerInstance INSTANCE = new DummySerializerInstance();
private DummySerializerInstance() { }
@Override
public SerializationStream serializeStream(final OutputStream s) {
return new SerializationStream() {
@Override
public void flush() {
// Need to implement this because DiskObjectWriter uses it to flush the compression stream
try {
s.flush();
} catch (IOException e) {
Platform.throwException(e);
}
}
@Override
public <T> SerializationStream writeObject(T t, ClassTag<T> ev1) {
throw new UnsupportedOperationException();
}
@Override
public void close() {
// Need to implement this because DiskObjectWriter uses it to close the compression stream
try {
s.close();
} catch (IOException e) {
Platform.throwException(e);
}
}
};
}
@Override
public <T> ByteBuffer serialize(T t, ClassTag<T> ev1) {
throw new UnsupportedOperationException();
}
@Override
public DeserializationStream deserializeStream(InputStream s) {
throw new UnsupportedOperationException();
}
@Override
public <T> T deserialize(ByteBuffer bytes, ClassLoader loader, ClassTag<T> ev1) {
throw new UnsupportedOperationException();
}
@Override
public <T> T deserialize(ByteBuffer bytes, ClassTag<T> ev1) {
throw new UnsupportedOperationException();
}
}
| 9,621 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark | Create_ds/spark/core/src/main/java/org/apache/spark/memory/TooLargePageException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.memory;
public class TooLargePageException extends RuntimeException {
TooLargePageException(long size) {
super("Cannot allocate a page of " + size + " bytes.");
}
}
| 9,622 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark | Create_ds/spark/core/src/main/java/org/apache/spark/memory/TaskMemoryManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.memory;
import javax.annotation.concurrent.GuardedBy;
import java.io.IOException;
import java.nio.channels.ClosedByInterruptException;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.unsafe.memory.MemoryBlock;
import org.apache.spark.util.Utils;
/**
* Manages the memory allocated by an individual task.
* <p>
* Most of the complexity in this class deals with encoding of off-heap addresses into 64-bit longs.
* In off-heap mode, memory can be directly addressed with 64-bit longs. In on-heap mode, memory is
* addressed by the combination of a base Object reference and a 64-bit offset within that object.
* This is a problem when we want to store pointers to data structures inside of other structures,
* such as record pointers inside hashmaps or sorting buffers. Even if we decided to use 128 bits
* to address memory, we can't just store the address of the base object since it's not guaranteed
* to remain stable as the heap gets reorganized due to GC.
* <p>
* Instead, we use the following approach to encode record pointers in 64-bit longs: for off-heap
* mode, just store the raw address, and for on-heap mode use the upper 13 bits of the address to
* store a "page number" and the lower 51 bits to store an offset within this page. These page
* numbers are used to index into a "page table" array inside of the MemoryManager in order to
* retrieve the base object.
* <p>
* This allows us to address 8192 pages. In on-heap mode, the maximum page size is limited by the
* maximum size of a long[] array, allowing us to address 8192 * (2^31 - 1) * 8 bytes, which is
* approximately 140 terabytes of memory.
*/
public class TaskMemoryManager {
private static final Logger logger = LoggerFactory.getLogger(TaskMemoryManager.class);
/** The number of bits used to address the page table. */
private static final int PAGE_NUMBER_BITS = 13;
/** The number of bits used to encode offsets in data pages. */
@VisibleForTesting
static final int OFFSET_BITS = 64 - PAGE_NUMBER_BITS; // 51
/** The number of entries in the page table. */
private static final int PAGE_TABLE_SIZE = 1 << PAGE_NUMBER_BITS;
/**
* Maximum supported data page size (in bytes). In principle, the maximum addressable page size is
* (1L << OFFSET_BITS) bytes, which is 2+ petabytes. However, the on-heap allocator's
* maximum page size is limited by the maximum amount of data that can be stored in a long[]
* array, which is (2^31 - 1) * 8 bytes (or about 17 gigabytes). Therefore, we cap this at 17
* gigabytes.
*/
public static final long MAXIMUM_PAGE_SIZE_BYTES = ((1L << 31) - 1) * 8L;
/** Bit mask for the lower 51 bits of a long. */
private static final long MASK_LONG_LOWER_51_BITS = 0x7FFFFFFFFFFFFL;
/**
* Similar to an operating system's page table, this array maps page numbers into base object
* pointers, allowing us to translate between the hashtable's internal 64-bit address
* representation and the baseObject+offset representation which we use to support both in- and
* off-heap addresses. When using an off-heap allocator, every entry in this map will be `null`.
* When using an in-heap allocator, the entries in this map will point to pages' base objects.
* Entries are added to this map as new data pages are allocated.
*/
private final MemoryBlock[] pageTable = new MemoryBlock[PAGE_TABLE_SIZE];
/**
* Bitmap for tracking free pages.
*/
private final BitSet allocatedPages = new BitSet(PAGE_TABLE_SIZE);
private final MemoryManager memoryManager;
private final long taskAttemptId;
/**
* Tracks whether we're in-heap or off-heap. For off-heap, we short-circuit most of these methods
* without doing any masking or lookups. Since this branching should be well-predicted by the JIT,
* this extra layer of indirection / abstraction hopefully shouldn't be too expensive.
*/
final MemoryMode tungstenMemoryMode;
/**
* Tracks spillable memory consumers.
*/
@GuardedBy("this")
private final HashSet<MemoryConsumer> consumers;
/**
* The amount of memory that is acquired but not used.
*/
private volatile long acquiredButNotUsed = 0L;
/**
* Construct a new TaskMemoryManager.
*/
public TaskMemoryManager(MemoryManager memoryManager, long taskAttemptId) {
this.tungstenMemoryMode = memoryManager.tungstenMemoryMode();
this.memoryManager = memoryManager;
this.taskAttemptId = taskAttemptId;
this.consumers = new HashSet<>();
}
/**
* Acquire N bytes of memory for a consumer. If there is no enough memory, it will call
* spill() of consumers to release more memory.
*
* @return number of bytes successfully granted (<= N).
*/
public long acquireExecutionMemory(long required, MemoryConsumer consumer) {
assert(required >= 0);
assert(consumer != null);
MemoryMode mode = consumer.getMode();
// If we are allocating Tungsten pages off-heap and receive a request to allocate on-heap
// memory here, then it may not make sense to spill since that would only end up freeing
// off-heap memory. This is subject to change, though, so it may be risky to make this
// optimization now in case we forget to undo it late when making changes.
synchronized (this) {
long got = memoryManager.acquireExecutionMemory(required, taskAttemptId, mode);
// Try to release memory from other consumers first, then we can reduce the frequency of
// spilling, avoid to have too many spilled files.
if (got < required) {
// Call spill() on other consumers to release memory
// Sort the consumers according their memory usage. So we avoid spilling the same consumer
// which is just spilled in last few times and re-spilling on it will produce many small
// spill files.
TreeMap<Long, List<MemoryConsumer>> sortedConsumers = new TreeMap<>();
for (MemoryConsumer c: consumers) {
if (c != consumer && c.getUsed() > 0 && c.getMode() == mode) {
long key = c.getUsed();
List<MemoryConsumer> list =
sortedConsumers.computeIfAbsent(key, k -> new ArrayList<>(1));
list.add(c);
}
}
while (!sortedConsumers.isEmpty()) {
// Get the consumer using the least memory more than the remaining required memory.
Map.Entry<Long, List<MemoryConsumer>> currentEntry =
sortedConsumers.ceilingEntry(required - got);
// No consumer has used memory more than the remaining required memory.
// Get the consumer of largest used memory.
if (currentEntry == null) {
currentEntry = sortedConsumers.lastEntry();
}
List<MemoryConsumer> cList = currentEntry.getValue();
MemoryConsumer c = cList.get(cList.size() - 1);
try {
long released = c.spill(required - got, consumer);
if (released > 0) {
logger.debug("Task {} released {} from {} for {}", taskAttemptId,
Utils.bytesToString(released), c, consumer);
got += memoryManager.acquireExecutionMemory(required - got, taskAttemptId, mode);
if (got >= required) {
break;
}
} else {
cList.remove(cList.size() - 1);
if (cList.isEmpty()) {
sortedConsumers.remove(currentEntry.getKey());
}
}
} catch (ClosedByInterruptException e) {
// This called by user to kill a task (e.g: speculative task).
logger.error("error while calling spill() on " + c, e);
throw new RuntimeException(e.getMessage());
} catch (IOException e) {
logger.error("error while calling spill() on " + c, e);
throw new SparkOutOfMemoryError("error while calling spill() on " + c + " : "
+ e.getMessage());
}
}
}
// call spill() on itself
if (got < required) {
try {
long released = consumer.spill(required - got, consumer);
if (released > 0) {
logger.debug("Task {} released {} from itself ({})", taskAttemptId,
Utils.bytesToString(released), consumer);
got += memoryManager.acquireExecutionMemory(required - got, taskAttemptId, mode);
}
} catch (ClosedByInterruptException e) {
// This called by user to kill a task (e.g: speculative task).
logger.error("error while calling spill() on " + consumer, e);
throw new RuntimeException(e.getMessage());
} catch (IOException e) {
logger.error("error while calling spill() on " + consumer, e);
throw new SparkOutOfMemoryError("error while calling spill() on " + consumer + " : "
+ e.getMessage());
}
}
consumers.add(consumer);
logger.debug("Task {} acquired {} for {}", taskAttemptId, Utils.bytesToString(got), consumer);
return got;
}
}
/**
* Release N bytes of execution memory for a MemoryConsumer.
*/
public void releaseExecutionMemory(long size, MemoryConsumer consumer) {
logger.debug("Task {} release {} from {}", taskAttemptId, Utils.bytesToString(size), consumer);
memoryManager.releaseExecutionMemory(size, taskAttemptId, consumer.getMode());
}
/**
* Dump the memory usage of all consumers.
*/
public void showMemoryUsage() {
logger.info("Memory used in task " + taskAttemptId);
synchronized (this) {
long memoryAccountedForByConsumers = 0;
for (MemoryConsumer c: consumers) {
long totalMemUsage = c.getUsed();
memoryAccountedForByConsumers += totalMemUsage;
if (totalMemUsage > 0) {
logger.info("Acquired by " + c + ": " + Utils.bytesToString(totalMemUsage));
}
}
long memoryNotAccountedFor =
memoryManager.getExecutionMemoryUsageForTask(taskAttemptId) - memoryAccountedForByConsumers;
logger.info(
"{} bytes of memory were used by task {} but are not associated with specific consumers",
memoryNotAccountedFor, taskAttemptId);
logger.info(
"{} bytes of memory are used for execution and {} bytes of memory are used for storage",
memoryManager.executionMemoryUsed(), memoryManager.storageMemoryUsed());
}
}
/**
* Return the page size in bytes.
*/
public long pageSizeBytes() {
return memoryManager.pageSizeBytes();
}
/**
* Allocate a block of memory that will be tracked in the MemoryManager's page table; this is
* intended for allocating large blocks of Tungsten memory that will be shared between operators.
*
* Returns `null` if there was not enough memory to allocate the page. May return a page that
* contains fewer bytes than requested, so callers should verify the size of returned pages.
*
* @throws TooLargePageException
*/
public MemoryBlock allocatePage(long size, MemoryConsumer consumer) {
assert(consumer != null);
assert(consumer.getMode() == tungstenMemoryMode);
if (size > MAXIMUM_PAGE_SIZE_BYTES) {
throw new TooLargePageException(size);
}
long acquired = acquireExecutionMemory(size, consumer);
if (acquired <= 0) {
return null;
}
final int pageNumber;
synchronized (this) {
pageNumber = allocatedPages.nextClearBit(0);
if (pageNumber >= PAGE_TABLE_SIZE) {
releaseExecutionMemory(acquired, consumer);
throw new IllegalStateException(
"Have already allocated a maximum of " + PAGE_TABLE_SIZE + " pages");
}
allocatedPages.set(pageNumber);
}
MemoryBlock page = null;
try {
page = memoryManager.tungstenMemoryAllocator().allocate(acquired);
} catch (OutOfMemoryError e) {
logger.warn("Failed to allocate a page ({} bytes), try again.", acquired);
// there is no enough memory actually, it means the actual free memory is smaller than
// MemoryManager thought, we should keep the acquired memory.
synchronized (this) {
acquiredButNotUsed += acquired;
allocatedPages.clear(pageNumber);
}
// this could trigger spilling to free some pages.
return allocatePage(size, consumer);
}
page.pageNumber = pageNumber;
pageTable[pageNumber] = page;
if (logger.isTraceEnabled()) {
logger.trace("Allocate page number {} ({} bytes)", pageNumber, acquired);
}
return page;
}
/**
* Free a block of memory allocated via {@link TaskMemoryManager#allocatePage}.
*/
public void freePage(MemoryBlock page, MemoryConsumer consumer) {
assert (page.pageNumber != MemoryBlock.NO_PAGE_NUMBER) :
"Called freePage() on memory that wasn't allocated with allocatePage()";
assert (page.pageNumber != MemoryBlock.FREED_IN_ALLOCATOR_PAGE_NUMBER) :
"Called freePage() on a memory block that has already been freed";
assert (page.pageNumber != MemoryBlock.FREED_IN_TMM_PAGE_NUMBER) :
"Called freePage() on a memory block that has already been freed";
assert(allocatedPages.get(page.pageNumber));
pageTable[page.pageNumber] = null;
synchronized (this) {
allocatedPages.clear(page.pageNumber);
}
if (logger.isTraceEnabled()) {
logger.trace("Freed page number {} ({} bytes)", page.pageNumber, page.size());
}
long pageSize = page.size();
// Clear the page number before passing the block to the MemoryAllocator's free().
// Doing this allows the MemoryAllocator to detect when a TaskMemoryManager-managed
// page has been inappropriately directly freed without calling TMM.freePage().
page.pageNumber = MemoryBlock.FREED_IN_TMM_PAGE_NUMBER;
memoryManager.tungstenMemoryAllocator().free(page);
releaseExecutionMemory(pageSize, consumer);
}
/**
* Given a memory page and offset within that page, encode this address into a 64-bit long.
* This address will remain valid as long as the corresponding page has not been freed.
*
* @param page a data page allocated by {@link TaskMemoryManager#allocatePage}/
* @param offsetInPage an offset in this page which incorporates the base offset. In other words,
* this should be the value that you would pass as the base offset into an
* UNSAFE call (e.g. page.baseOffset() + something).
* @return an encoded page address.
*/
public long encodePageNumberAndOffset(MemoryBlock page, long offsetInPage) {
if (tungstenMemoryMode == MemoryMode.OFF_HEAP) {
// In off-heap mode, an offset is an absolute address that may require a full 64 bits to
// encode. Due to our page size limitation, though, we can convert this into an offset that's
// relative to the page's base offset; this relative offset will fit in 51 bits.
offsetInPage -= page.getBaseOffset();
}
return encodePageNumberAndOffset(page.pageNumber, offsetInPage);
}
@VisibleForTesting
public static long encodePageNumberAndOffset(int pageNumber, long offsetInPage) {
assert (pageNumber >= 0) : "encodePageNumberAndOffset called with invalid page";
return (((long) pageNumber) << OFFSET_BITS) | (offsetInPage & MASK_LONG_LOWER_51_BITS);
}
@VisibleForTesting
public static int decodePageNumber(long pagePlusOffsetAddress) {
return (int) (pagePlusOffsetAddress >>> OFFSET_BITS);
}
private static long decodeOffset(long pagePlusOffsetAddress) {
return (pagePlusOffsetAddress & MASK_LONG_LOWER_51_BITS);
}
/**
* Get the page associated with an address encoded by
* {@link TaskMemoryManager#encodePageNumberAndOffset(MemoryBlock, long)}
*/
public Object getPage(long pagePlusOffsetAddress) {
if (tungstenMemoryMode == MemoryMode.ON_HEAP) {
final int pageNumber = decodePageNumber(pagePlusOffsetAddress);
assert (pageNumber >= 0 && pageNumber < PAGE_TABLE_SIZE);
final MemoryBlock page = pageTable[pageNumber];
assert (page != null);
assert (page.getBaseObject() != null);
return page.getBaseObject();
} else {
return null;
}
}
/**
* Get the offset associated with an address encoded by
* {@link TaskMemoryManager#encodePageNumberAndOffset(MemoryBlock, long)}
*/
public long getOffsetInPage(long pagePlusOffsetAddress) {
final long offsetInPage = decodeOffset(pagePlusOffsetAddress);
if (tungstenMemoryMode == MemoryMode.ON_HEAP) {
return offsetInPage;
} else {
// In off-heap mode, an offset is an absolute address. In encodePageNumberAndOffset, we
// converted the absolute address into a relative address. Here, we invert that operation:
final int pageNumber = decodePageNumber(pagePlusOffsetAddress);
assert (pageNumber >= 0 && pageNumber < PAGE_TABLE_SIZE);
final MemoryBlock page = pageTable[pageNumber];
assert (page != null);
return page.getBaseOffset() + offsetInPage;
}
}
/**
* Clean up all allocated memory and pages. Returns the number of bytes freed. A non-zero return
* value can be used to detect memory leaks.
*/
public long cleanUpAllAllocatedMemory() {
synchronized (this) {
for (MemoryConsumer c: consumers) {
if (c != null && c.getUsed() > 0) {
// In case of failed task, it's normal to see leaked memory
logger.debug("unreleased " + Utils.bytesToString(c.getUsed()) + " memory from " + c);
}
}
consumers.clear();
for (MemoryBlock page : pageTable) {
if (page != null) {
logger.debug("unreleased page: " + page + " in task " + taskAttemptId);
page.pageNumber = MemoryBlock.FREED_IN_TMM_PAGE_NUMBER;
memoryManager.tungstenMemoryAllocator().free(page);
}
}
Arrays.fill(pageTable, null);
}
// release the memory that is not used by any consumer (acquired for pages in tungsten mode).
memoryManager.releaseExecutionMemory(acquiredButNotUsed, taskAttemptId, tungstenMemoryMode);
return memoryManager.releaseAllExecutionMemoryForTask(taskAttemptId);
}
/**
* Returns the memory consumption, in bytes, for the current task.
*/
public long getMemoryConsumptionForThisTask() {
return memoryManager.getExecutionMemoryUsageForTask(taskAttemptId);
}
/**
* Returns Tungsten memory mode
*/
public MemoryMode getTungstenMemoryMode() {
return tungstenMemoryMode;
}
}
| 9,623 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark | Create_ds/spark/core/src/main/java/org/apache/spark/memory/MemoryMode.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.memory;
import org.apache.spark.annotation.Private;
@Private
public enum MemoryMode {
ON_HEAP,
OFF_HEAP
}
| 9,624 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark | Create_ds/spark/core/src/main/java/org/apache/spark/memory/SparkOutOfMemoryError.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.memory;
import org.apache.spark.annotation.Private;
/**
* This exception is thrown when a task can not acquire memory from the Memory manager.
* Instead of throwing {@link OutOfMemoryError}, which kills the executor,
* we should use throw this exception, which just kills the current task.
*/
@Private
public final class SparkOutOfMemoryError extends OutOfMemoryError {
public SparkOutOfMemoryError(String s) {
super(s);
}
public SparkOutOfMemoryError(OutOfMemoryError e) {
super(e.getMessage());
}
}
| 9,625 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark | Create_ds/spark/core/src/main/java/org/apache/spark/memory/MemoryConsumer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.memory;
import java.io.IOException;
import org.apache.spark.unsafe.array.LongArray;
import org.apache.spark.unsafe.memory.MemoryBlock;
/**
* A memory consumer of {@link TaskMemoryManager} that supports spilling.
*
* Note: this only supports allocation / spilling of Tungsten memory.
*/
public abstract class MemoryConsumer {
protected final TaskMemoryManager taskMemoryManager;
private final long pageSize;
private final MemoryMode mode;
protected long used;
protected MemoryConsumer(TaskMemoryManager taskMemoryManager, long pageSize, MemoryMode mode) {
this.taskMemoryManager = taskMemoryManager;
this.pageSize = pageSize;
this.mode = mode;
}
protected MemoryConsumer(TaskMemoryManager taskMemoryManager) {
this(taskMemoryManager, taskMemoryManager.pageSizeBytes(), MemoryMode.ON_HEAP);
}
/**
* Returns the memory mode, {@link MemoryMode#ON_HEAP} or {@link MemoryMode#OFF_HEAP}.
*/
public MemoryMode getMode() {
return mode;
}
/**
* Returns the size of used memory in bytes.
*/
protected long getUsed() {
return used;
}
/**
* Force spill during building.
*/
public void spill() throws IOException {
spill(Long.MAX_VALUE, this);
}
/**
* Spill some data to disk to release memory, which will be called by TaskMemoryManager
* when there is not enough memory for the task.
*
* This should be implemented by subclass.
*
* Note: In order to avoid possible deadlock, should not call acquireMemory() from spill().
*
* Note: today, this only frees Tungsten-managed pages.
*
* @param size the amount of memory should be released
* @param trigger the MemoryConsumer that trigger this spilling
* @return the amount of released memory in bytes
* @throws IOException
*/
public abstract long spill(long size, MemoryConsumer trigger) throws IOException;
/**
* Allocates a LongArray of `size`. Note that this method may throw `OutOfMemoryError` if Spark
* doesn't have enough memory for this allocation, or throw `TooLargePageException` if this
* `LongArray` is too large to fit in a single page. The caller side should take care of these
* two exceptions, or make sure the `size` is small enough that won't trigger exceptions.
*
* @throws SparkOutOfMemoryError
* @throws TooLargePageException
*/
public LongArray allocateArray(long size) {
long required = size * 8L;
MemoryBlock page = taskMemoryManager.allocatePage(required, this);
if (page == null || page.size() < required) {
throwOom(page, required);
}
used += required;
return new LongArray(page);
}
/**
* Frees a LongArray.
*/
public void freeArray(LongArray array) {
freePage(array.memoryBlock());
}
/**
* Allocate a memory block with at least `required` bytes.
*
* @throws OutOfMemoryError
*/
protected MemoryBlock allocatePage(long required) {
MemoryBlock page = taskMemoryManager.allocatePage(Math.max(pageSize, required), this);
if (page == null || page.size() < required) {
throwOom(page, required);
}
used += page.size();
return page;
}
/**
* Free a memory block.
*/
protected void freePage(MemoryBlock page) {
used -= page.size();
taskMemoryManager.freePage(page, this);
}
/**
* Allocates memory of `size`.
*/
public long acquireMemory(long size) {
long granted = taskMemoryManager.acquireExecutionMemory(size, this);
used += granted;
return granted;
}
/**
* Release N bytes of memory.
*/
public void freeMemory(long size) {
taskMemoryManager.releaseExecutionMemory(size, this);
used -= size;
}
private void throwOom(final MemoryBlock page, final long required) {
long got = 0;
if (page != null) {
got = page.size();
taskMemoryManager.freePage(page, this);
}
taskMemoryManager.showMemoryUsage();
throw new SparkOutOfMemoryError("Unable to acquire " + required + " bytes of memory, got " +
got);
}
}
| 9,626 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark | Create_ds/spark/core/src/main/java/org/apache/spark/util/MutableURLClassLoader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util;
import java.net.URL;
import java.net.URLClassLoader;
/**
* URL class loader that exposes the `addURL` method in URLClassLoader.
*/
public class MutableURLClassLoader extends URLClassLoader {
static {
ClassLoader.registerAsParallelCapable();
}
public MutableURLClassLoader(URL[] urls, ClassLoader parent) {
super(urls, parent);
}
@Override
public void addURL(URL url) {
super.addURL(url);
}
}
| 9,627 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark | Create_ds/spark/core/src/main/java/org/apache/spark/util/EnumUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util;
import com.google.common.base.Joiner;
import org.apache.spark.annotation.Private;
@Private
public class EnumUtil {
public static <E extends Enum<E>> E parseIgnoreCase(Class<E> clz, String str) {
E[] constants = clz.getEnumConstants();
if (str == null) {
return null;
}
for (E e : constants) {
if (e.name().equalsIgnoreCase(str)) {
return e;
}
}
throw new IllegalArgumentException(
String.format("Illegal type='%s'. Supported type values: %s",
str, Joiner.on(", ").join(constants)));
}
}
| 9,628 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark | Create_ds/spark/core/src/main/java/org/apache/spark/util/ChildFirstURLClassLoader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util;
import java.io.IOException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
/**
* A mutable class loader that gives preference to its own URLs over the parent class loader
* when loading classes and resources.
*/
public class ChildFirstURLClassLoader extends MutableURLClassLoader {
static {
ClassLoader.registerAsParallelCapable();
}
private ParentClassLoader parent;
public ChildFirstURLClassLoader(URL[] urls, ClassLoader parent) {
super(urls, null);
this.parent = new ParentClassLoader(parent);
}
@Override
public Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
try {
return super.loadClass(name, resolve);
} catch (ClassNotFoundException cnf) {
return parent.loadClass(name, resolve);
}
}
@Override
public Enumeration<URL> getResources(String name) throws IOException {
ArrayList<URL> urls = Collections.list(super.getResources(name));
urls.addAll(Collections.list(parent.getResources(name)));
return Collections.enumeration(urls);
}
@Override
public URL getResource(String name) {
URL url = super.getResource(name);
if (url != null) {
return url;
} else {
return parent.getResource(name);
}
}
}
| 9,629 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark | Create_ds/spark/core/src/main/java/org/apache/spark/util/ParentClassLoader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util;
/**
* A class loader which makes some protected methods in ClassLoader accessible.
*/
public class ParentClassLoader extends ClassLoader {
static {
ClassLoader.registerAsParallelCapable();
}
public ParentClassLoader(ClassLoader parent) {
super(parent);
}
@Override
public Class<?> findClass(String name) throws ClassNotFoundException {
return super.findClass(name);
}
@Override
public Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
return super.loadClass(name, resolve);
}
}
| 9,630 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/util | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/TimSort.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Based on TimSort.java from the Android Open Source Project
*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection;
import java.util.Comparator;
/**
* A port of the Android TimSort class, which utilizes a "stable, adaptive, iterative mergesort."
* See the method comment on sort() for more details.
*
* This has been kept in Java with the original style in order to match very closely with the
* Android source code, and thus be easy to verify correctness. The class is package private. We put
* a simple Scala wrapper {@link org.apache.spark.util.collection.Sorter}, which is available to
* package org.apache.spark.
*
* The purpose of the port is to generalize the interface to the sort to accept input data formats
* besides simple arrays where every element is sorted individually. For instance, the AppendOnlyMap
* uses this to sort an Array with alternating elements of the form [key, value, key, value].
* This generalization comes with minimal overhead -- see SortDataFormat for more information.
*
* We allow key reuse to prevent creating many key objects -- see SortDataFormat.
*
* @see org.apache.spark.util.collection.SortDataFormat
* @see org.apache.spark.util.collection.Sorter
*/
class TimSort<K, Buffer> {
/**
* This is the minimum sized sequence that will be merged. Shorter
* sequences will be lengthened by calling binarySort. If the entire
* array is less than this length, no merges will be performed.
*
* This constant should be a power of two. It was 64 in Tim Peter's C
* implementation, but 32 was empirically determined to work better in
* this implementation. In the unlikely event that you set this constant
* to be a number that's not a power of two, you'll need to change the
* minRunLength computation.
*
* If you decrease this constant, you must change the stackLen
* computation in the TimSort constructor, or you risk an
* ArrayOutOfBounds exception. See listsort.txt for a discussion
* of the minimum stack length required as a function of the length
* of the array being sorted and the minimum merge sequence length.
*/
private static final int MIN_MERGE = 32;
private final SortDataFormat<K, Buffer> s;
public TimSort(SortDataFormat<K, Buffer> sortDataFormat) {
this.s = sortDataFormat;
}
/**
* A stable, adaptive, iterative mergesort that requires far fewer than
* n lg(n) comparisons when running on partially sorted arrays, while
* offering performance comparable to a traditional mergesort when run
* on random arrays. Like all proper mergesorts, this sort is stable and
* runs O(n log n) time (worst case). In the worst case, this sort requires
* temporary storage space for n/2 object references; in the best case,
* it requires only a small constant amount of space.
*
* This implementation was adapted from Tim Peters's list sort for
* Python, which is described in detail here:
*
* http://svn.python.org/projects/python/trunk/Objects/listsort.txt
*
* Tim's C code may be found here:
*
* http://svn.python.org/projects/python/trunk/Objects/listobject.c
*
* The underlying techniques are described in this paper (and may have
* even earlier origins):
*
* "Optimistic Sorting and Information Theoretic Complexity"
* Peter McIlroy
* SODA (Fourth Annual ACM-SIAM Symposium on Discrete Algorithms),
* pp 467-474, Austin, Texas, 25-27 January 1993.
*
* While the API to this class consists solely of static methods, it is
* (privately) instantiable; a TimSort instance holds the state of an ongoing
* sort, assuming the input array is large enough to warrant the full-blown
* TimSort. Small arrays are sorted in place, using a binary insertion sort.
*
* @author Josh Bloch
*/
public void sort(Buffer a, int lo, int hi, Comparator<? super K> c) {
assert c != null;
int nRemaining = hi - lo;
if (nRemaining < 2)
return; // Arrays of size 0 and 1 are always sorted
// If array is small, do a "mini-TimSort" with no merges
if (nRemaining < MIN_MERGE) {
int initRunLen = countRunAndMakeAscending(a, lo, hi, c);
binarySort(a, lo, hi, lo + initRunLen, c);
return;
}
/**
* March over the array once, left to right, finding natural runs,
* extending short natural runs to minRun elements, and merging runs
* to maintain stack invariant.
*/
SortState sortState = new SortState(a, c, hi - lo);
int minRun = minRunLength(nRemaining);
do {
// Identify next run
int runLen = countRunAndMakeAscending(a, lo, hi, c);
// If run is short, extend to min(minRun, nRemaining)
if (runLen < minRun) {
int force = nRemaining <= minRun ? nRemaining : minRun;
binarySort(a, lo, lo + force, lo + runLen, c);
runLen = force;
}
// Push run onto pending-run stack, and maybe merge
sortState.pushRun(lo, runLen);
sortState.mergeCollapse();
// Advance to find next run
lo += runLen;
nRemaining -= runLen;
} while (nRemaining != 0);
// Merge all remaining runs to complete sort
assert lo == hi;
sortState.mergeForceCollapse();
assert sortState.stackSize == 1;
}
/**
* Sorts the specified portion of the specified array using a binary
* insertion sort. This is the best method for sorting small numbers
* of elements. It requires O(n log n) compares, but O(n^2) data
* movement (worst case).
*
* If the initial part of the specified range is already sorted,
* this method can take advantage of it: the method assumes that the
* elements from index {@code lo}, inclusive, to {@code start},
* exclusive are already sorted.
*
* @param a the array in which a range is to be sorted
* @param lo the index of the first element in the range to be sorted
* @param hi the index after the last element in the range to be sorted
* @param start the index of the first element in the range that is
* not already known to be sorted ({@code lo <= start <= hi})
* @param c comparator to used for the sort
*/
@SuppressWarnings("fallthrough")
private void binarySort(Buffer a, int lo, int hi, int start, Comparator<? super K> c) {
assert lo <= start && start <= hi;
if (start == lo)
start++;
K key0 = s.newKey();
K key1 = s.newKey();
Buffer pivotStore = s.allocate(1);
for ( ; start < hi; start++) {
s.copyElement(a, start, pivotStore, 0);
K pivot = s.getKey(pivotStore, 0, key0);
// Set left (and right) to the index where a[start] (pivot) belongs
int left = lo;
int right = start;
assert left <= right;
/*
* Invariants:
* pivot >= all in [lo, left).
* pivot < all in [right, start).
*/
while (left < right) {
int mid = (left + right) >>> 1;
if (c.compare(pivot, s.getKey(a, mid, key1)) < 0)
right = mid;
else
left = mid + 1;
}
assert left == right;
/*
* The invariants still hold: pivot >= all in [lo, left) and
* pivot < all in [left, start), so pivot belongs at left. Note
* that if there are elements equal to pivot, left points to the
* first slot after them -- that's why this sort is stable.
* Slide elements over to make room for pivot.
*/
int n = start - left; // The number of elements to move
// Switch is just an optimization for arraycopy in default case
switch (n) {
case 2: s.copyElement(a, left + 1, a, left + 2);
case 1: s.copyElement(a, left, a, left + 1);
break;
default: s.copyRange(a, left, a, left + 1, n);
}
s.copyElement(pivotStore, 0, a, left);
}
}
/**
* Returns the length of the run beginning at the specified position in
* the specified array and reverses the run if it is descending (ensuring
* that the run will always be ascending when the method returns).
*
* A run is the longest ascending sequence with:
*
* a[lo] <= a[lo + 1] <= a[lo + 2] <= ...
*
* or the longest descending sequence with:
*
* a[lo] > a[lo + 1] > a[lo + 2] > ...
*
* For its intended use in a stable mergesort, the strictness of the
* definition of "descending" is needed so that the call can safely
* reverse a descending sequence without violating stability.
*
* @param a the array in which a run is to be counted and possibly reversed
* @param lo index of the first element in the run
* @param hi index after the last element that may be contained in the run.
It is required that {@code lo < hi}.
* @param c the comparator to used for the sort
* @return the length of the run beginning at the specified position in
* the specified array
*/
private int countRunAndMakeAscending(Buffer a, int lo, int hi, Comparator<? super K> c) {
assert lo < hi;
int runHi = lo + 1;
if (runHi == hi)
return 1;
K key0 = s.newKey();
K key1 = s.newKey();
// Find end of run, and reverse range if descending
if (c.compare(s.getKey(a, runHi++, key0), s.getKey(a, lo, key1)) < 0) { // Descending
while (runHi < hi && c.compare(s.getKey(a, runHi, key0), s.getKey(a, runHi - 1, key1)) < 0)
runHi++;
reverseRange(a, lo, runHi);
} else { // Ascending
while (runHi < hi && c.compare(s.getKey(a, runHi, key0), s.getKey(a, runHi - 1, key1)) >= 0)
runHi++;
}
return runHi - lo;
}
/**
* Reverse the specified range of the specified array.
*
* @param a the array in which a range is to be reversed
* @param lo the index of the first element in the range to be reversed
* @param hi the index after the last element in the range to be reversed
*/
private void reverseRange(Buffer a, int lo, int hi) {
hi--;
while (lo < hi) {
s.swap(a, lo, hi);
lo++;
hi--;
}
}
/**
* Returns the minimum acceptable run length for an array of the specified
* length. Natural runs shorter than this will be extended with
* {@link #binarySort}.
*
* Roughly speaking, the computation is:
*
* If n < MIN_MERGE, return n (it's too small to bother with fancy stuff).
* Else if n is an exact power of 2, return MIN_MERGE/2.
* Else return an int k, MIN_MERGE/2 <= k <= MIN_MERGE, such that n/k
* is close to, but strictly less than, an exact power of 2.
*
* For the rationale, see listsort.txt.
*
* @param n the length of the array to be sorted
* @return the length of the minimum run to be merged
*/
private int minRunLength(int n) {
assert n >= 0;
int r = 0; // Becomes 1 if any 1 bits are shifted off
while (n >= MIN_MERGE) {
r |= (n & 1);
n >>= 1;
}
return n + r;
}
private class SortState {
/**
* The Buffer being sorted.
*/
private final Buffer a;
/**
* Length of the sort Buffer.
*/
private final int aLength;
/**
* The comparator for this sort.
*/
private final Comparator<? super K> c;
/**
* When we get into galloping mode, we stay there until both runs win less
* often than MIN_GALLOP consecutive times.
*/
private static final int MIN_GALLOP = 7;
/**
* This controls when we get *into* galloping mode. It is initialized
* to MIN_GALLOP. The mergeLo and mergeHi methods nudge it higher for
* random data, and lower for highly structured data.
*/
private int minGallop = MIN_GALLOP;
/**
* Maximum initial size of tmp array, which is used for merging. The array
* can grow to accommodate demand.
*
* Unlike Tim's original C version, we do not allocate this much storage
* when sorting smaller arrays. This change was required for performance.
*/
private static final int INITIAL_TMP_STORAGE_LENGTH = 256;
/**
* Temp storage for merges.
*/
private Buffer tmp; // Actual runtime type will be Object[], regardless of T
/**
* Length of the temp storage.
*/
private int tmpLength = 0;
/**
* A stack of pending runs yet to be merged. Run i starts at
* address base[i] and extends for len[i] elements. It's always
* true (so long as the indices are in bounds) that:
*
* runBase[i] + runLen[i] == runBase[i + 1]
*
* so we could cut the storage for this, but it's a minor amount,
* and keeping all the info explicit simplifies the code.
*/
private int stackSize = 0; // Number of pending runs on stack
private final int[] runBase;
private final int[] runLen;
/**
* Creates a TimSort instance to maintain the state of an ongoing sort.
*
* @param a the array to be sorted
* @param c the comparator to determine the order of the sort
*/
private SortState(Buffer a, Comparator<? super K> c, int len) {
this.aLength = len;
this.a = a;
this.c = c;
// Allocate temp storage (which may be increased later if necessary)
tmpLength = len < 2 * INITIAL_TMP_STORAGE_LENGTH ? len >>> 1 : INITIAL_TMP_STORAGE_LENGTH;
tmp = s.allocate(tmpLength);
/*
* Allocate runs-to-be-merged stack (which cannot be expanded). The
* stack length requirements are described in listsort.txt. The C
* version always uses the same stack length (85), but this was
* measured to be too expensive when sorting "mid-sized" arrays (e.g.,
* 100 elements) in Java. Therefore, we use smaller (but sufficiently
* large) stack lengths for smaller arrays. The "magic numbers" in the
* computation below must be changed if MIN_MERGE is decreased. See
* the MIN_MERGE declaration above for more information.
*/
int stackLen = (len < 120 ? 5 :
len < 1542 ? 10 :
len < 119151 ? 19 : 40);
runBase = new int[stackLen];
runLen = new int[stackLen];
}
/**
* Pushes the specified run onto the pending-run stack.
*
* @param runBase index of the first element in the run
* @param runLen the number of elements in the run
*/
private void pushRun(int runBase, int runLen) {
this.runBase[stackSize] = runBase;
this.runLen[stackSize] = runLen;
stackSize++;
}
/**
* Examines the stack of runs waiting to be merged and merges adjacent runs
* until the stack invariants are reestablished:
*
* 1. runLen[i - 3] > runLen[i - 2] + runLen[i - 1]
* 2. runLen[i - 2] > runLen[i - 1]
*
* This method is called each time a new run is pushed onto the stack,
* so the invariants are guaranteed to hold for i < stackSize upon
* entry to the method.
*/
private void mergeCollapse() {
while (stackSize > 1) {
int n = stackSize - 2;
if ( (n >= 1 && runLen[n-1] <= runLen[n] + runLen[n+1])
|| (n >= 2 && runLen[n-2] <= runLen[n] + runLen[n-1])) {
if (runLen[n - 1] < runLen[n + 1])
n--;
} else if (runLen[n] > runLen[n + 1]) {
break; // Invariant is established
}
mergeAt(n);
}
}
/**
* Merges all runs on the stack until only one remains. This method is
* called once, to complete the sort.
*/
private void mergeForceCollapse() {
while (stackSize > 1) {
int n = stackSize - 2;
if (n > 0 && runLen[n - 1] < runLen[n + 1])
n--;
mergeAt(n);
}
}
/**
* Merges the two runs at stack indices i and i+1. Run i must be
* the penultimate or antepenultimate run on the stack. In other words,
* i must be equal to stackSize-2 or stackSize-3.
*
* @param i stack index of the first of the two runs to merge
*/
private void mergeAt(int i) {
assert stackSize >= 2;
assert i >= 0;
assert i == stackSize - 2 || i == stackSize - 3;
int base1 = runBase[i];
int len1 = runLen[i];
int base2 = runBase[i + 1];
int len2 = runLen[i + 1];
assert len1 > 0 && len2 > 0;
assert base1 + len1 == base2;
/*
* Record the length of the combined runs; if i is the 3rd-last
* run now, also slide over the last run (which isn't involved
* in this merge). The current run (i+1) goes away in any case.
*/
runLen[i] = len1 + len2;
if (i == stackSize - 3) {
runBase[i + 1] = runBase[i + 2];
runLen[i + 1] = runLen[i + 2];
}
stackSize--;
K key0 = s.newKey();
/*
* Find where the first element of run2 goes in run1. Prior elements
* in run1 can be ignored (because they're already in place).
*/
int k = gallopRight(s.getKey(a, base2, key0), a, base1, len1, 0, c);
assert k >= 0;
base1 += k;
len1 -= k;
if (len1 == 0)
return;
/*
* Find where the last element of run1 goes in run2. Subsequent elements
* in run2 can be ignored (because they're already in place).
*/
len2 = gallopLeft(s.getKey(a, base1 + len1 - 1, key0), a, base2, len2, len2 - 1, c);
assert len2 >= 0;
if (len2 == 0)
return;
// Merge remaining runs, using tmp array with min(len1, len2) elements
if (len1 <= len2)
mergeLo(base1, len1, base2, len2);
else
mergeHi(base1, len1, base2, len2);
}
/**
* Locates the position at which to insert the specified key into the
* specified sorted range; if the range contains an element equal to key,
* returns the index of the leftmost equal element.
*
* @param key the key whose insertion point to search for
* @param a the array in which to search
* @param base the index of the first element in the range
* @param len the length of the range; must be > 0
* @param hint the index at which to begin the search, 0 <= hint < n.
* The closer hint is to the result, the faster this method will run.
* @param c the comparator used to order the range, and to search
* @return the int k, 0 <= k <= n such that a[b + k - 1] < key <= a[b + k],
* pretending that a[b - 1] is minus infinity and a[b + n] is infinity.
* In other words, key belongs at index b + k; or in other words,
* the first k elements of a should precede key, and the last n - k
* should follow it.
*/
private int gallopLeft(K key, Buffer a, int base, int len, int hint, Comparator<? super K> c) {
assert len > 0 && hint >= 0 && hint < len;
int lastOfs = 0;
int ofs = 1;
K key0 = s.newKey();
if (c.compare(key, s.getKey(a, base + hint, key0)) > 0) {
// Gallop right until a[base+hint+lastOfs] < key <= a[base+hint+ofs]
int maxOfs = len - hint;
while (ofs < maxOfs && c.compare(key, s.getKey(a, base + hint + ofs, key0)) > 0) {
lastOfs = ofs;
ofs = (ofs << 1) + 1;
if (ofs <= 0) // int overflow
ofs = maxOfs;
}
if (ofs > maxOfs)
ofs = maxOfs;
// Make offsets relative to base
lastOfs += hint;
ofs += hint;
} else { // key <= a[base + hint]
// Gallop left until a[base+hint-ofs] < key <= a[base+hint-lastOfs]
final int maxOfs = hint + 1;
while (ofs < maxOfs && c.compare(key, s.getKey(a, base + hint - ofs, key0)) <= 0) {
lastOfs = ofs;
ofs = (ofs << 1) + 1;
if (ofs <= 0) // int overflow
ofs = maxOfs;
}
if (ofs > maxOfs)
ofs = maxOfs;
// Make offsets relative to base
int tmp = lastOfs;
lastOfs = hint - ofs;
ofs = hint - tmp;
}
assert -1 <= lastOfs && lastOfs < ofs && ofs <= len;
/*
* Now a[base+lastOfs] < key <= a[base+ofs], so key belongs somewhere
* to the right of lastOfs but no farther right than ofs. Do a binary
* search, with invariant a[base + lastOfs - 1] < key <= a[base + ofs].
*/
lastOfs++;
while (lastOfs < ofs) {
int m = lastOfs + ((ofs - lastOfs) >>> 1);
if (c.compare(key, s.getKey(a, base + m, key0)) > 0)
lastOfs = m + 1; // a[base + m] < key
else
ofs = m; // key <= a[base + m]
}
assert lastOfs == ofs; // so a[base + ofs - 1] < key <= a[base + ofs]
return ofs;
}
/**
* Like gallopLeft, except that if the range contains an element equal to
* key, gallopRight returns the index after the rightmost equal element.
*
* @param key the key whose insertion point to search for
* @param a the array in which to search
* @param base the index of the first element in the range
* @param len the length of the range; must be > 0
* @param hint the index at which to begin the search, 0 <= hint < n.
* The closer hint is to the result, the faster this method will run.
* @param c the comparator used to order the range, and to search
* @return the int k, 0 <= k <= n such that a[b + k - 1] <= key < a[b + k]
*/
private int gallopRight(K key, Buffer a, int base, int len, int hint, Comparator<? super K> c) {
assert len > 0 && hint >= 0 && hint < len;
int ofs = 1;
int lastOfs = 0;
K key1 = s.newKey();
if (c.compare(key, s.getKey(a, base + hint, key1)) < 0) {
// Gallop left until a[b+hint - ofs] <= key < a[b+hint - lastOfs]
int maxOfs = hint + 1;
while (ofs < maxOfs && c.compare(key, s.getKey(a, base + hint - ofs, key1)) < 0) {
lastOfs = ofs;
ofs = (ofs << 1) + 1;
if (ofs <= 0) // int overflow
ofs = maxOfs;
}
if (ofs > maxOfs)
ofs = maxOfs;
// Make offsets relative to b
int tmp = lastOfs;
lastOfs = hint - ofs;
ofs = hint - tmp;
} else { // a[b + hint] <= key
// Gallop right until a[b+hint + lastOfs] <= key < a[b+hint + ofs]
int maxOfs = len - hint;
while (ofs < maxOfs && c.compare(key, s.getKey(a, base + hint + ofs, key1)) >= 0) {
lastOfs = ofs;
ofs = (ofs << 1) + 1;
if (ofs <= 0) // int overflow
ofs = maxOfs;
}
if (ofs > maxOfs)
ofs = maxOfs;
// Make offsets relative to b
lastOfs += hint;
ofs += hint;
}
assert -1 <= lastOfs && lastOfs < ofs && ofs <= len;
/*
* Now a[b + lastOfs] <= key < a[b + ofs], so key belongs somewhere to
* the right of lastOfs but no farther right than ofs. Do a binary
* search, with invariant a[b + lastOfs - 1] <= key < a[b + ofs].
*/
lastOfs++;
while (lastOfs < ofs) {
int m = lastOfs + ((ofs - lastOfs) >>> 1);
if (c.compare(key, s.getKey(a, base + m, key1)) < 0)
ofs = m; // key < a[b + m]
else
lastOfs = m + 1; // a[b + m] <= key
}
assert lastOfs == ofs; // so a[b + ofs - 1] <= key < a[b + ofs]
return ofs;
}
/**
* Merges two adjacent runs in place, in a stable fashion. The first
* element of the first run must be greater than the first element of the
* second run (a[base1] > a[base2]), and the last element of the first run
* (a[base1 + len1-1]) must be greater than all elements of the second run.
*
* For performance, this method should be called only when len1 <= len2;
* its twin, mergeHi should be called if len1 >= len2. (Either method
* may be called if len1 == len2.)
*
* @param base1 index of first element in first run to be merged
* @param len1 length of first run to be merged (must be > 0)
* @param base2 index of first element in second run to be merged
* (must be aBase + aLen)
* @param len2 length of second run to be merged (must be > 0)
*/
private void mergeLo(int base1, int len1, int base2, int len2) {
assert len1 > 0 && len2 > 0 && base1 + len1 == base2;
// Copy first run into temp array
Buffer a = this.a; // For performance
Buffer tmp = ensureCapacity(len1);
s.copyRange(a, base1, tmp, 0, len1);
int cursor1 = 0; // Indexes into tmp array
int cursor2 = base2; // Indexes int a
int dest = base1; // Indexes int a
// Move first element of second run and deal with degenerate cases
s.copyElement(a, cursor2++, a, dest++);
if (--len2 == 0) {
s.copyRange(tmp, cursor1, a, dest, len1);
return;
}
if (len1 == 1) {
s.copyRange(a, cursor2, a, dest, len2);
s.copyElement(tmp, cursor1, a, dest + len2); // Last elt of run 1 to end of merge
return;
}
K key0 = s.newKey();
K key1 = s.newKey();
Comparator<? super K> c = this.c; // Use local variable for performance
int minGallop = this.minGallop; // " " " " "
outer:
while (true) {
int count1 = 0; // Number of times in a row that first run won
int count2 = 0; // Number of times in a row that second run won
/*
* Do the straightforward thing until (if ever) one run starts
* winning consistently.
*/
do {
assert len1 > 1 && len2 > 0;
if (c.compare(s.getKey(a, cursor2, key0), s.getKey(tmp, cursor1, key1)) < 0) {
s.copyElement(a, cursor2++, a, dest++);
count2++;
count1 = 0;
if (--len2 == 0)
break outer;
} else {
s.copyElement(tmp, cursor1++, a, dest++);
count1++;
count2 = 0;
if (--len1 == 1)
break outer;
}
} while ((count1 | count2) < minGallop);
/*
* One run is winning so consistently that galloping may be a
* huge win. So try that, and continue galloping until (if ever)
* neither run appears to be winning consistently anymore.
*/
do {
assert len1 > 1 && len2 > 0;
count1 = gallopRight(s.getKey(a, cursor2, key0), tmp, cursor1, len1, 0, c);
if (count1 != 0) {
s.copyRange(tmp, cursor1, a, dest, count1);
dest += count1;
cursor1 += count1;
len1 -= count1;
if (len1 <= 1) // len1 == 1 || len1 == 0
break outer;
}
s.copyElement(a, cursor2++, a, dest++);
if (--len2 == 0)
break outer;
count2 = gallopLeft(s.getKey(tmp, cursor1, key0), a, cursor2, len2, 0, c);
if (count2 != 0) {
s.copyRange(a, cursor2, a, dest, count2);
dest += count2;
cursor2 += count2;
len2 -= count2;
if (len2 == 0)
break outer;
}
s.copyElement(tmp, cursor1++, a, dest++);
if (--len1 == 1)
break outer;
minGallop--;
} while (count1 >= MIN_GALLOP | count2 >= MIN_GALLOP);
if (minGallop < 0)
minGallop = 0;
minGallop += 2; // Penalize for leaving gallop mode
} // End of "outer" loop
this.minGallop = minGallop < 1 ? 1 : minGallop; // Write back to field
if (len1 == 1) {
assert len2 > 0;
s.copyRange(a, cursor2, a, dest, len2);
s.copyElement(tmp, cursor1, a, dest + len2); // Last elt of run 1 to end of merge
} else if (len1 == 0) {
throw new IllegalArgumentException(
"Comparison method violates its general contract!");
} else {
assert len2 == 0;
assert len1 > 1;
s.copyRange(tmp, cursor1, a, dest, len1);
}
}
/**
* Like mergeLo, except that this method should be called only if
* len1 >= len2; mergeLo should be called if len1 <= len2. (Either method
* may be called if len1 == len2.)
*
* @param base1 index of first element in first run to be merged
* @param len1 length of first run to be merged (must be > 0)
* @param base2 index of first element in second run to be merged
* (must be aBase + aLen)
* @param len2 length of second run to be merged (must be > 0)
*/
private void mergeHi(int base1, int len1, int base2, int len2) {
assert len1 > 0 && len2 > 0 && base1 + len1 == base2;
// Copy second run into temp array
Buffer a = this.a; // For performance
Buffer tmp = ensureCapacity(len2);
s.copyRange(a, base2, tmp, 0, len2);
int cursor1 = base1 + len1 - 1; // Indexes into a
int cursor2 = len2 - 1; // Indexes into tmp array
int dest = base2 + len2 - 1; // Indexes into a
K key0 = s.newKey();
K key1 = s.newKey();
// Move last element of first run and deal with degenerate cases
s.copyElement(a, cursor1--, a, dest--);
if (--len1 == 0) {
s.copyRange(tmp, 0, a, dest - (len2 - 1), len2);
return;
}
if (len2 == 1) {
dest -= len1;
cursor1 -= len1;
s.copyRange(a, cursor1 + 1, a, dest + 1, len1);
s.copyElement(tmp, cursor2, a, dest);
return;
}
Comparator<? super K> c = this.c; // Use local variable for performance
int minGallop = this.minGallop; // " " " " "
outer:
while (true) {
int count1 = 0; // Number of times in a row that first run won
int count2 = 0; // Number of times in a row that second run won
/*
* Do the straightforward thing until (if ever) one run
* appears to win consistently.
*/
do {
assert len1 > 0 && len2 > 1;
if (c.compare(s.getKey(tmp, cursor2, key0), s.getKey(a, cursor1, key1)) < 0) {
s.copyElement(a, cursor1--, a, dest--);
count1++;
count2 = 0;
if (--len1 == 0)
break outer;
} else {
s.copyElement(tmp, cursor2--, a, dest--);
count2++;
count1 = 0;
if (--len2 == 1)
break outer;
}
} while ((count1 | count2) < minGallop);
/*
* One run is winning so consistently that galloping may be a
* huge win. So try that, and continue galloping until (if ever)
* neither run appears to be winning consistently anymore.
*/
do {
assert len1 > 0 && len2 > 1;
count1 = len1 - gallopRight(s.getKey(tmp, cursor2, key0), a, base1, len1, len1 - 1, c);
if (count1 != 0) {
dest -= count1;
cursor1 -= count1;
len1 -= count1;
s.copyRange(a, cursor1 + 1, a, dest + 1, count1);
if (len1 == 0)
break outer;
}
s.copyElement(tmp, cursor2--, a, dest--);
if (--len2 == 1)
break outer;
count2 = len2 - gallopLeft(s.getKey(a, cursor1, key0), tmp, 0, len2, len2 - 1, c);
if (count2 != 0) {
dest -= count2;
cursor2 -= count2;
len2 -= count2;
s.copyRange(tmp, cursor2 + 1, a, dest + 1, count2);
if (len2 <= 1) // len2 == 1 || len2 == 0
break outer;
}
s.copyElement(a, cursor1--, a, dest--);
if (--len1 == 0)
break outer;
minGallop--;
} while (count1 >= MIN_GALLOP | count2 >= MIN_GALLOP);
if (minGallop < 0)
minGallop = 0;
minGallop += 2; // Penalize for leaving gallop mode
} // End of "outer" loop
this.minGallop = minGallop < 1 ? 1 : minGallop; // Write back to field
if (len2 == 1) {
assert len1 > 0;
dest -= len1;
cursor1 -= len1;
s.copyRange(a, cursor1 + 1, a, dest + 1, len1);
s.copyElement(tmp, cursor2, a, dest); // Move first elt of run2 to front of merge
} else if (len2 == 0) {
throw new IllegalArgumentException(
"Comparison method violates its general contract!");
} else {
assert len1 == 0;
assert len2 > 0;
s.copyRange(tmp, 0, a, dest - (len2 - 1), len2);
}
}
/**
* Ensures that the external array tmp has at least the specified
* number of elements, increasing its size if necessary. The size
* increases exponentially to ensure amortized linear time complexity.
*
* @param minCapacity the minimum required capacity of the tmp array
* @return tmp, whether or not it grew
*/
private Buffer ensureCapacity(int minCapacity) {
if (tmpLength < minCapacity) {
// Compute smallest power of 2 > minCapacity
int newSize = minCapacity;
newSize |= newSize >> 1;
newSize |= newSize >> 2;
newSize |= newSize >> 4;
newSize |= newSize >> 8;
newSize |= newSize >> 16;
newSize++;
if (newSize < 0) // Not bloody likely!
newSize = minCapacity;
else
newSize = Math.min(newSize, aLength >>> 1);
tmp = s.allocate(newSize);
tmpLength = newSize;
}
return tmp;
}
}
}
| 9,631 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSortDataFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection.unsafe.sort;
import org.apache.spark.unsafe.Platform;
import org.apache.spark.unsafe.array.LongArray;
import org.apache.spark.util.collection.SortDataFormat;
/**
* Supports sorting an array of (record pointer, key prefix) pairs.
* Used in {@link UnsafeInMemorySorter}.
* <p>
* Within each long[] buffer, position {@code 2 * i} holds a pointer to the record at
* index {@code i}, while position {@code 2 * i + 1} in the array holds an 8-byte key prefix.
*/
public final class UnsafeSortDataFormat
extends SortDataFormat<RecordPointerAndKeyPrefix, LongArray> {
private final LongArray buffer;
public UnsafeSortDataFormat(LongArray buffer) {
this.buffer = buffer;
}
@Override
public RecordPointerAndKeyPrefix getKey(LongArray data, int pos) {
// Since we re-use keys, this method shouldn't be called.
throw new UnsupportedOperationException();
}
@Override
public RecordPointerAndKeyPrefix newKey() {
return new RecordPointerAndKeyPrefix();
}
@Override
public RecordPointerAndKeyPrefix getKey(LongArray data, int pos,
RecordPointerAndKeyPrefix reuse) {
reuse.recordPointer = data.get(pos * 2);
reuse.keyPrefix = data.get(pos * 2 + 1);
return reuse;
}
@Override
public void swap(LongArray data, int pos0, int pos1) {
long tempPointer = data.get(pos0 * 2);
long tempKeyPrefix = data.get(pos0 * 2 + 1);
data.set(pos0 * 2, data.get(pos1 * 2));
data.set(pos0 * 2 + 1, data.get(pos1 * 2 + 1));
data.set(pos1 * 2, tempPointer);
data.set(pos1 * 2 + 1, tempKeyPrefix);
}
@Override
public void copyElement(LongArray src, int srcPos, LongArray dst, int dstPos) {
dst.set(dstPos * 2, src.get(srcPos * 2));
dst.set(dstPos * 2 + 1, src.get(srcPos * 2 + 1));
}
@Override
public void copyRange(LongArray src, int srcPos, LongArray dst, int dstPos, int length) {
Platform.copyMemory(
src.getBaseObject(),
src.getBaseOffset() + srcPos * 16L,
dst.getBaseObject(),
dst.getBaseOffset() + dstPos * 16L,
length * 16L);
}
@Override
public LongArray allocate(int length) {
assert (length * 2L <= buffer.size()) :
"the buffer is smaller than required: " + buffer.size() + " < " + (length * 2);
return buffer;
}
}
| 9,632 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection.unsafe.sort;
import com.google.common.io.ByteStreams;
import com.google.common.io.Closeables;
import org.apache.spark.SparkEnv;
import org.apache.spark.TaskContext;
import org.apache.spark.io.NioBufferedFileInputStream;
import org.apache.spark.io.ReadAheadInputStream;
import org.apache.spark.serializer.SerializerManager;
import org.apache.spark.storage.BlockId;
import org.apache.spark.unsafe.Platform;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
/**
* Reads spill files written by {@link UnsafeSorterSpillWriter} (see that class for a description
* of the file format).
*/
public final class UnsafeSorterSpillReader extends UnsafeSorterIterator implements Closeable {
private static final Logger logger = LoggerFactory.getLogger(UnsafeSorterSpillReader.class);
private static final int DEFAULT_BUFFER_SIZE_BYTES = 1024 * 1024; // 1 MB
private static final int MAX_BUFFER_SIZE_BYTES = 16777216; // 16 mb
private InputStream in;
private DataInputStream din;
// Variables that change with every record read:
private int recordLength;
private long keyPrefix;
private int numRecords;
private int numRecordsRemaining;
private byte[] arr = new byte[1024 * 1024];
private Object baseObject = arr;
private final TaskContext taskContext = TaskContext.get();
public UnsafeSorterSpillReader(
SerializerManager serializerManager,
File file,
BlockId blockId) throws IOException {
assert (file.length() > 0);
long bufferSizeBytes =
SparkEnv.get() == null ?
DEFAULT_BUFFER_SIZE_BYTES:
SparkEnv.get().conf().getSizeAsBytes("spark.unsafe.sorter.spill.reader.buffer.size",
DEFAULT_BUFFER_SIZE_BYTES);
if (bufferSizeBytes > MAX_BUFFER_SIZE_BYTES || bufferSizeBytes < DEFAULT_BUFFER_SIZE_BYTES) {
// fall back to a sane default value
logger.warn("Value of config \"spark.unsafe.sorter.spill.reader.buffer.size\" = {} not in " +
"allowed range [{}, {}). Falling back to default value : {} bytes", bufferSizeBytes,
DEFAULT_BUFFER_SIZE_BYTES, MAX_BUFFER_SIZE_BYTES, DEFAULT_BUFFER_SIZE_BYTES);
bufferSizeBytes = DEFAULT_BUFFER_SIZE_BYTES;
}
final boolean readAheadEnabled = SparkEnv.get() != null &&
SparkEnv.get().conf().getBoolean("spark.unsafe.sorter.spill.read.ahead.enabled", true);
final InputStream bs =
new NioBufferedFileInputStream(file, (int) bufferSizeBytes);
try {
if (readAheadEnabled) {
this.in = new ReadAheadInputStream(serializerManager.wrapStream(blockId, bs),
(int) bufferSizeBytes);
} else {
this.in = serializerManager.wrapStream(blockId, bs);
}
this.din = new DataInputStream(this.in);
numRecords = numRecordsRemaining = din.readInt();
} catch (IOException e) {
Closeables.close(bs, /* swallowIOException = */ true);
throw e;
}
}
@Override
public int getNumRecords() {
return numRecords;
}
@Override
public boolean hasNext() {
return (numRecordsRemaining > 0);
}
@Override
public void loadNext() throws IOException {
// Kill the task in case it has been marked as killed. This logic is from
// InterruptibleIterator, but we inline it here instead of wrapping the iterator in order
// to avoid performance overhead. This check is added here in `loadNext()` instead of in
// `hasNext()` because it's technically possible for the caller to be relying on
// `getNumRecords()` instead of `hasNext()` to know when to stop.
if (taskContext != null) {
taskContext.killTaskIfInterrupted();
}
recordLength = din.readInt();
keyPrefix = din.readLong();
if (recordLength > arr.length) {
arr = new byte[recordLength];
baseObject = arr;
}
ByteStreams.readFully(in, arr, 0, recordLength);
numRecordsRemaining--;
if (numRecordsRemaining == 0) {
close();
}
}
@Override
public Object getBaseObject() {
return baseObject;
}
@Override
public long getBaseOffset() {
return Platform.BYTE_ARRAY_OFFSET;
}
@Override
public int getRecordLength() {
return recordLength;
}
@Override
public long getKeyPrefix() {
return keyPrefix;
}
@Override
public void close() throws IOException {
if (in != null) {
try {
in.close();
} finally {
in = null;
din = null;
}
}
}
}
| 9,633 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection.unsafe.sort;
import java.io.File;
import java.io.IOException;
import scala.Tuple2;
import org.apache.spark.SparkConf;
import org.apache.spark.serializer.SerializerManager;
import org.apache.spark.executor.ShuffleWriteMetrics;
import org.apache.spark.serializer.DummySerializerInstance;
import org.apache.spark.storage.BlockId;
import org.apache.spark.storage.BlockManager;
import org.apache.spark.storage.DiskBlockObjectWriter;
import org.apache.spark.storage.TempLocalBlockId;
import org.apache.spark.unsafe.Platform;
import org.apache.spark.internal.config.package$;
/**
* Spills a list of sorted records to disk. Spill files have the following format:
*
* [# of records (int)] [[len (int)][prefix (long)][data (bytes)]...]
*/
public final class UnsafeSorterSpillWriter {
private final SparkConf conf = new SparkConf();
/** The buffer size to use when writing the sorted records to an on-disk file */
private final int diskWriteBufferSize =
(int) (long) conf.get(package$.MODULE$.SHUFFLE_DISK_WRITE_BUFFER_SIZE());
// Small writes to DiskBlockObjectWriter will be fairly inefficient. Since there doesn't seem to
// be an API to directly transfer bytes from managed memory to the disk writer, we buffer
// data through a byte array.
private byte[] writeBuffer = new byte[diskWriteBufferSize];
private final File file;
private final BlockId blockId;
private final int numRecordsToWrite;
private DiskBlockObjectWriter writer;
private int numRecordsSpilled = 0;
public UnsafeSorterSpillWriter(
BlockManager blockManager,
int fileBufferSize,
ShuffleWriteMetrics writeMetrics,
int numRecordsToWrite) throws IOException {
final Tuple2<TempLocalBlockId, File> spilledFileInfo =
blockManager.diskBlockManager().createTempLocalBlock();
this.file = spilledFileInfo._2();
this.blockId = spilledFileInfo._1();
this.numRecordsToWrite = numRecordsToWrite;
// Unfortunately, we need a serializer instance in order to construct a DiskBlockObjectWriter.
// Our write path doesn't actually use this serializer (since we end up calling the `write()`
// OutputStream methods), but DiskBlockObjectWriter still calls some methods on it. To work
// around this, we pass a dummy no-op serializer.
writer = blockManager.getDiskWriter(
blockId, file, DummySerializerInstance.INSTANCE, fileBufferSize, writeMetrics);
// Write the number of records
writeIntToBuffer(numRecordsToWrite, 0);
writer.write(writeBuffer, 0, 4);
}
// Based on DataOutputStream.writeLong.
private void writeLongToBuffer(long v, int offset) {
writeBuffer[offset + 0] = (byte)(v >>> 56);
writeBuffer[offset + 1] = (byte)(v >>> 48);
writeBuffer[offset + 2] = (byte)(v >>> 40);
writeBuffer[offset + 3] = (byte)(v >>> 32);
writeBuffer[offset + 4] = (byte)(v >>> 24);
writeBuffer[offset + 5] = (byte)(v >>> 16);
writeBuffer[offset + 6] = (byte)(v >>> 8);
writeBuffer[offset + 7] = (byte)(v >>> 0);
}
// Based on DataOutputStream.writeInt.
private void writeIntToBuffer(int v, int offset) {
writeBuffer[offset + 0] = (byte)(v >>> 24);
writeBuffer[offset + 1] = (byte)(v >>> 16);
writeBuffer[offset + 2] = (byte)(v >>> 8);
writeBuffer[offset + 3] = (byte)(v >>> 0);
}
/**
* Write a record to a spill file.
*
* @param baseObject the base object / memory page containing the record
* @param baseOffset the base offset which points directly to the record data.
* @param recordLength the length of the record.
* @param keyPrefix a sort key prefix
*/
public void write(
Object baseObject,
long baseOffset,
int recordLength,
long keyPrefix) throws IOException {
if (numRecordsSpilled == numRecordsToWrite) {
throw new IllegalStateException(
"Number of records written exceeded numRecordsToWrite = " + numRecordsToWrite);
} else {
numRecordsSpilled++;
}
writeIntToBuffer(recordLength, 0);
writeLongToBuffer(keyPrefix, 4);
int dataRemaining = recordLength;
int freeSpaceInWriteBuffer = diskWriteBufferSize - 4 - 8; // space used by prefix + len
long recordReadPosition = baseOffset;
while (dataRemaining > 0) {
final int toTransfer = Math.min(freeSpaceInWriteBuffer, dataRemaining);
Platform.copyMemory(
baseObject,
recordReadPosition,
writeBuffer,
Platform.BYTE_ARRAY_OFFSET + (diskWriteBufferSize - freeSpaceInWriteBuffer),
toTransfer);
writer.write(writeBuffer, 0, (diskWriteBufferSize - freeSpaceInWriteBuffer) + toTransfer);
recordReadPosition += toTransfer;
dataRemaining -= toTransfer;
freeSpaceInWriteBuffer = diskWriteBufferSize;
}
if (freeSpaceInWriteBuffer < diskWriteBufferSize) {
writer.write(writeBuffer, 0, (diskWriteBufferSize - freeSpaceInWriteBuffer));
}
writer.recordWritten();
}
public void close() throws IOException {
writer.commitAndGet();
writer.close();
writer = null;
writeBuffer = null;
}
public File getFile() {
return file;
}
public UnsafeSorterSpillReader getReader(SerializerManager serializerManager) throws IOException {
return new UnsafeSorterSpillReader(serializerManager, file, blockId);
}
public int recordsSpilled() {
return numRecordsSpilled;
}
}
| 9,634 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillMerger.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection.unsafe.sort;
import java.io.IOException;
import java.util.Comparator;
import java.util.PriorityQueue;
final class UnsafeSorterSpillMerger {
private int numRecords = 0;
private final PriorityQueue<UnsafeSorterIterator> priorityQueue;
UnsafeSorterSpillMerger(
RecordComparator recordComparator,
PrefixComparator prefixComparator,
int numSpills) {
Comparator<UnsafeSorterIterator> comparator = (left, right) -> {
int prefixComparisonResult =
prefixComparator.compare(left.getKeyPrefix(), right.getKeyPrefix());
if (prefixComparisonResult == 0) {
return recordComparator.compare(
left.getBaseObject(), left.getBaseOffset(), left.getRecordLength(),
right.getBaseObject(), right.getBaseOffset(), right.getRecordLength());
} else {
return prefixComparisonResult;
}
};
priorityQueue = new PriorityQueue<>(numSpills, comparator);
}
/**
* Add an UnsafeSorterIterator to this merger
*/
public void addSpillIfNotEmpty(UnsafeSorterIterator spillReader) throws IOException {
if (spillReader.hasNext()) {
// We only add the spillReader to the priorityQueue if it is not empty. We do this to
// make sure the hasNext method of UnsafeSorterIterator returned by getSortedIterator
// does not return wrong result because hasNext will return true
// at least priorityQueue.size() times. If we allow n spillReaders in the
// priorityQueue, we will have n extra empty records in the result of UnsafeSorterIterator.
spillReader.loadNext();
priorityQueue.add(spillReader);
numRecords += spillReader.getNumRecords();
}
}
public UnsafeSorterIterator getSortedIterator() throws IOException {
return new UnsafeSorterIterator() {
private UnsafeSorterIterator spillReader;
@Override
public int getNumRecords() {
return numRecords;
}
@Override
public boolean hasNext() {
return !priorityQueue.isEmpty() || (spillReader != null && spillReader.hasNext());
}
@Override
public void loadNext() throws IOException {
if (spillReader != null) {
if (spillReader.hasNext()) {
spillReader.loadNext();
priorityQueue.add(spillReader);
}
}
spillReader = priorityQueue.remove();
}
@Override
public Object getBaseObject() { return spillReader.getBaseObject(); }
@Override
public long getBaseOffset() { return spillReader.getBaseOffset(); }
@Override
public int getRecordLength() { return spillReader.getRecordLength(); }
@Override
public long getKeyPrefix() { return spillReader.getKeyPrefix(); }
};
}
}
| 9,635 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection.unsafe.sort;
import java.io.IOException;
public abstract class UnsafeSorterIterator {
public abstract boolean hasNext();
public abstract void loadNext() throws IOException;
public abstract Object getBaseObject();
public abstract long getBaseOffset();
public abstract int getRecordLength();
public abstract long getKeyPrefix();
public abstract int getNumRecords();
}
| 9,636 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/RadixSort.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection.unsafe.sort;
import com.google.common.primitives.Ints;
import org.apache.spark.unsafe.Platform;
import org.apache.spark.unsafe.array.LongArray;
public class RadixSort {
/**
* Sorts a given array of longs using least-significant-digit radix sort. This routine assumes
* you have extra space at the end of the array at least equal to the number of records. The
* sort is destructive and may relocate the data positioned within the array.
*
* @param array array of long elements followed by at least that many empty slots.
* @param numRecords number of data records in the array.
* @param startByteIndex the first byte (in range [0, 7]) to sort each long by, counting from the
* least significant byte.
* @param endByteIndex the last byte (in range [0, 7]) to sort each long by, counting from the
* least significant byte. Must be greater than startByteIndex.
* @param desc whether this is a descending (binary-order) sort.
* @param signed whether this is a signed (two's complement) sort.
*
* @return The starting index of the sorted data within the given array. We return this instead
* of always copying the data back to position zero for efficiency.
*/
public static int sort(
LongArray array, long numRecords, int startByteIndex, int endByteIndex,
boolean desc, boolean signed) {
assert startByteIndex >= 0 : "startByteIndex (" + startByteIndex + ") should >= 0";
assert endByteIndex <= 7 : "endByteIndex (" + endByteIndex + ") should <= 7";
assert endByteIndex > startByteIndex;
assert numRecords * 2 <= array.size();
long inIndex = 0;
long outIndex = numRecords;
if (numRecords > 0) {
long[][] counts = getCounts(array, numRecords, startByteIndex, endByteIndex);
for (int i = startByteIndex; i <= endByteIndex; i++) {
if (counts[i] != null) {
sortAtByte(
array, numRecords, counts[i], i, inIndex, outIndex,
desc, signed && i == endByteIndex);
long tmp = inIndex;
inIndex = outIndex;
outIndex = tmp;
}
}
}
return Ints.checkedCast(inIndex);
}
/**
* Performs a partial sort by copying data into destination offsets for each byte value at the
* specified byte offset.
*
* @param array array to partially sort.
* @param numRecords number of data records in the array.
* @param counts counts for each byte value. This routine destructively modifies this array.
* @param byteIdx the byte in a long to sort at, counting from the least significant byte.
* @param inIndex the starting index in the array where input data is located.
* @param outIndex the starting index where sorted output data should be written.
* @param desc whether this is a descending (binary-order) sort.
* @param signed whether this is a signed (two's complement) sort (only applies to last byte).
*/
private static void sortAtByte(
LongArray array, long numRecords, long[] counts, int byteIdx, long inIndex, long outIndex,
boolean desc, boolean signed) {
assert counts.length == 256;
long[] offsets = transformCountsToOffsets(
counts, numRecords, array.getBaseOffset() + outIndex * 8L, 8, desc, signed);
Object baseObject = array.getBaseObject();
long baseOffset = array.getBaseOffset() + inIndex * 8L;
long maxOffset = baseOffset + numRecords * 8L;
for (long offset = baseOffset; offset < maxOffset; offset += 8) {
long value = Platform.getLong(baseObject, offset);
int bucket = (int)((value >>> (byteIdx * 8)) & 0xff);
Platform.putLong(baseObject, offsets[bucket], value);
offsets[bucket] += 8;
}
}
/**
* Computes a value histogram for each byte in the given array.
*
* @param array array to count records in.
* @param numRecords number of data records in the array.
* @param startByteIndex the first byte to compute counts for (the prior are skipped).
* @param endByteIndex the last byte to compute counts for.
*
* @return an array of eight 256-byte count arrays, one for each byte starting from the least
* significant byte. If the byte does not need sorting the array will be null.
*/
private static long[][] getCounts(
LongArray array, long numRecords, int startByteIndex, int endByteIndex) {
long[][] counts = new long[8][];
// Optimization: do a fast pre-pass to determine which byte indices we can skip for sorting.
// If all the byte values at a particular index are the same we don't need to count it.
long bitwiseMax = 0;
long bitwiseMin = -1L;
long maxOffset = array.getBaseOffset() + numRecords * 8L;
Object baseObject = array.getBaseObject();
for (long offset = array.getBaseOffset(); offset < maxOffset; offset += 8) {
long value = Platform.getLong(baseObject, offset);
bitwiseMax |= value;
bitwiseMin &= value;
}
long bitsChanged = bitwiseMin ^ bitwiseMax;
// Compute counts for each byte index.
for (int i = startByteIndex; i <= endByteIndex; i++) {
if (((bitsChanged >>> (i * 8)) & 0xff) != 0) {
counts[i] = new long[256];
// TODO(ekl) consider computing all the counts in one pass.
for (long offset = array.getBaseOffset(); offset < maxOffset; offset += 8) {
counts[i][(int)((Platform.getLong(baseObject, offset) >>> (i * 8)) & 0xff)]++;
}
}
}
return counts;
}
/**
* Transforms counts into the proper unsafe output offsets for the sort type.
*
* @param counts counts for each byte value. This routine destructively modifies this array.
* @param numRecords number of data records in the original data array.
* @param outputOffset output offset in bytes from the base array object.
* @param bytesPerRecord size of each record (8 for plain sort, 16 for key-prefix sort).
* @param desc whether this is a descending (binary-order) sort.
* @param signed whether this is a signed (two's complement) sort.
*
* @return the input counts array.
*/
private static long[] transformCountsToOffsets(
long[] counts, long numRecords, long outputOffset, long bytesPerRecord,
boolean desc, boolean signed) {
assert counts.length == 256;
int start = signed ? 128 : 0; // output the negative records first (values 129-255).
if (desc) {
long pos = numRecords;
for (int i = start; i < start + 256; i++) {
pos -= counts[i & 0xff];
counts[i & 0xff] = outputOffset + pos * bytesPerRecord;
}
} else {
long pos = 0;
for (int i = start; i < start + 256; i++) {
long tmp = counts[i & 0xff];
counts[i & 0xff] = outputOffset + pos * bytesPerRecord;
pos += tmp;
}
}
return counts;
}
/**
* Specialization of sort() for key-prefix arrays. In this type of array, each record consists
* of two longs, only the second of which is sorted on.
*
* @param startIndex starting index in the array to sort from. This parameter is not supported
* in the plain sort() implementation.
*/
public static int sortKeyPrefixArray(
LongArray array,
long startIndex,
long numRecords,
int startByteIndex,
int endByteIndex,
boolean desc,
boolean signed) {
assert startByteIndex >= 0 : "startByteIndex (" + startByteIndex + ") should >= 0";
assert endByteIndex <= 7 : "endByteIndex (" + endByteIndex + ") should <= 7";
assert endByteIndex > startByteIndex;
assert numRecords * 4 <= array.size();
long inIndex = startIndex;
long outIndex = startIndex + numRecords * 2L;
if (numRecords > 0) {
long[][] counts = getKeyPrefixArrayCounts(
array, startIndex, numRecords, startByteIndex, endByteIndex);
for (int i = startByteIndex; i <= endByteIndex; i++) {
if (counts[i] != null) {
sortKeyPrefixArrayAtByte(
array, numRecords, counts[i], i, inIndex, outIndex,
desc, signed && i == endByteIndex);
long tmp = inIndex;
inIndex = outIndex;
outIndex = tmp;
}
}
}
return Ints.checkedCast(inIndex);
}
/**
* Specialization of getCounts() for key-prefix arrays. We could probably combine this with
* getCounts with some added parameters but that seems to hurt in benchmarks.
*/
private static long[][] getKeyPrefixArrayCounts(
LongArray array, long startIndex, long numRecords, int startByteIndex, int endByteIndex) {
long[][] counts = new long[8][];
long bitwiseMax = 0;
long bitwiseMin = -1L;
long baseOffset = array.getBaseOffset() + startIndex * 8L;
long limit = baseOffset + numRecords * 16L;
Object baseObject = array.getBaseObject();
for (long offset = baseOffset; offset < limit; offset += 16) {
long value = Platform.getLong(baseObject, offset + 8);
bitwiseMax |= value;
bitwiseMin &= value;
}
long bitsChanged = bitwiseMin ^ bitwiseMax;
for (int i = startByteIndex; i <= endByteIndex; i++) {
if (((bitsChanged >>> (i * 8)) & 0xff) != 0) {
counts[i] = new long[256];
for (long offset = baseOffset; offset < limit; offset += 16) {
counts[i][(int)((Platform.getLong(baseObject, offset + 8) >>> (i * 8)) & 0xff)]++;
}
}
}
return counts;
}
/**
* Specialization of sortAtByte() for key-prefix arrays.
*/
private static void sortKeyPrefixArrayAtByte(
LongArray array, long numRecords, long[] counts, int byteIdx, long inIndex, long outIndex,
boolean desc, boolean signed) {
assert counts.length == 256;
long[] offsets = transformCountsToOffsets(
counts, numRecords, array.getBaseOffset() + outIndex * 8L, 16, desc, signed);
Object baseObject = array.getBaseObject();
long baseOffset = array.getBaseOffset() + inIndex * 8L;
long maxOffset = baseOffset + numRecords * 16L;
for (long offset = baseOffset; offset < maxOffset; offset += 16) {
long key = Platform.getLong(baseObject, offset);
long prefix = Platform.getLong(baseObject, offset + 8);
int bucket = (int)((prefix >>> (byteIdx * 8)) & 0xff);
long dest = offsets[bucket];
Platform.putLong(baseObject, dest, key);
Platform.putLong(baseObject, dest + 8, prefix);
offsets[bucket] += 16;
}
}
}
| 9,637 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection.unsafe.sort;
import java.util.Comparator;
import java.util.LinkedList;
import org.apache.avro.reflect.Nullable;
import org.apache.spark.TaskContext;
import org.apache.spark.memory.MemoryConsumer;
import org.apache.spark.memory.SparkOutOfMemoryError;
import org.apache.spark.memory.TaskMemoryManager;
import org.apache.spark.unsafe.Platform;
import org.apache.spark.unsafe.UnsafeAlignedOffset;
import org.apache.spark.unsafe.array.LongArray;
import org.apache.spark.unsafe.memory.MemoryBlock;
import org.apache.spark.util.collection.Sorter;
/**
* Sorts records using an AlphaSort-style key-prefix sort. This sort stores pointers to records
* alongside a user-defined prefix of the record's sorting key. When the underlying sort algorithm
* compares records, it will first compare the stored key prefixes; if the prefixes are not equal,
* then we do not need to traverse the record pointers to compare the actual records. Avoiding these
* random memory accesses improves cache hit rates.
*/
public final class UnsafeInMemorySorter {
private static final class SortComparator implements Comparator<RecordPointerAndKeyPrefix> {
private final RecordComparator recordComparator;
private final PrefixComparator prefixComparator;
private final TaskMemoryManager memoryManager;
SortComparator(
RecordComparator recordComparator,
PrefixComparator prefixComparator,
TaskMemoryManager memoryManager) {
this.recordComparator = recordComparator;
this.prefixComparator = prefixComparator;
this.memoryManager = memoryManager;
}
@Override
public int compare(RecordPointerAndKeyPrefix r1, RecordPointerAndKeyPrefix r2) {
final int prefixComparisonResult = prefixComparator.compare(r1.keyPrefix, r2.keyPrefix);
int uaoSize = UnsafeAlignedOffset.getUaoSize();
if (prefixComparisonResult == 0) {
final Object baseObject1 = memoryManager.getPage(r1.recordPointer);
final long baseOffset1 = memoryManager.getOffsetInPage(r1.recordPointer) + uaoSize;
final int baseLength1 = UnsafeAlignedOffset.getSize(baseObject1, baseOffset1 - uaoSize);
final Object baseObject2 = memoryManager.getPage(r2.recordPointer);
final long baseOffset2 = memoryManager.getOffsetInPage(r2.recordPointer) + uaoSize;
final int baseLength2 = UnsafeAlignedOffset.getSize(baseObject2, baseOffset2 - uaoSize);
return recordComparator.compare(baseObject1, baseOffset1, baseLength1, baseObject2,
baseOffset2, baseLength2);
} else {
return prefixComparisonResult;
}
}
}
private final MemoryConsumer consumer;
private final TaskMemoryManager memoryManager;
@Nullable
private final Comparator<RecordPointerAndKeyPrefix> sortComparator;
/**
* If non-null, specifies the radix sort parameters and that radix sort will be used.
*/
@Nullable
private final PrefixComparators.RadixSortSupport radixSortSupport;
/**
* Within this buffer, position {@code 2 * i} holds a pointer to the record at
* index {@code i}, while position {@code 2 * i + 1} in the array holds an 8-byte key prefix.
*
* Only part of the array will be used to store the pointers, the rest part is preserved as
* temporary buffer for sorting.
*/
private LongArray array;
/**
* The position in the sort buffer where new records can be inserted.
*/
private int pos = 0;
/**
* If sorting with radix sort, specifies the starting position in the sort buffer where records
* with non-null prefixes are kept. Positions [0..nullBoundaryPos) will contain null-prefixed
* records, and positions [nullBoundaryPos..pos) non-null prefixed records. This lets us avoid
* radix sorting over null values.
*/
private int nullBoundaryPos = 0;
/*
* How many records could be inserted, because part of the array should be left for sorting.
*/
private int usableCapacity = 0;
private long initialSize;
private long totalSortTimeNanos = 0L;
public UnsafeInMemorySorter(
final MemoryConsumer consumer,
final TaskMemoryManager memoryManager,
final RecordComparator recordComparator,
final PrefixComparator prefixComparator,
int initialSize,
boolean canUseRadixSort) {
this(consumer, memoryManager, recordComparator, prefixComparator,
consumer.allocateArray(initialSize * 2L), canUseRadixSort);
}
public UnsafeInMemorySorter(
final MemoryConsumer consumer,
final TaskMemoryManager memoryManager,
final RecordComparator recordComparator,
final PrefixComparator prefixComparator,
LongArray array,
boolean canUseRadixSort) {
this.consumer = consumer;
this.memoryManager = memoryManager;
this.initialSize = array.size();
if (recordComparator != null) {
this.sortComparator = new SortComparator(recordComparator, prefixComparator, memoryManager);
if (canUseRadixSort && prefixComparator instanceof PrefixComparators.RadixSortSupport) {
this.radixSortSupport = (PrefixComparators.RadixSortSupport)prefixComparator;
} else {
this.radixSortSupport = null;
}
} else {
this.sortComparator = null;
this.radixSortSupport = null;
}
this.array = array;
this.usableCapacity = getUsableCapacity();
}
private int getUsableCapacity() {
// Radix sort requires same amount of used memory as buffer, Tim sort requires
// half of the used memory as buffer.
return (int) (array.size() / (radixSortSupport != null ? 2 : 1.5));
}
/**
* Free the memory used by pointer array.
*/
public void free() {
if (consumer != null) {
if (array != null) {
consumer.freeArray(array);
}
array = null;
}
}
public void reset() {
if (consumer != null) {
consumer.freeArray(array);
// the call to consumer.allocateArray may trigger a spill which in turn access this instance
// and eventually re-enter this method and try to free the array again. by setting the array
// to null and its length to 0 we effectively make the spill code-path a no-op. setting the
// array to null also indicates that it has already been de-allocated which prevents a double
// de-allocation in free().
array = null;
usableCapacity = 0;
pos = 0;
nullBoundaryPos = 0;
array = consumer.allocateArray(initialSize);
usableCapacity = getUsableCapacity();
}
pos = 0;
nullBoundaryPos = 0;
}
/**
* @return the number of records that have been inserted into this sorter.
*/
public int numRecords() {
return pos / 2;
}
/**
* @return the total amount of time spent sorting data (in-memory only).
*/
public long getSortTimeNanos() {
return totalSortTimeNanos;
}
public long getMemoryUsage() {
return array.size() * 8;
}
public boolean hasSpaceForAnotherRecord() {
return pos + 1 < usableCapacity;
}
public void expandPointerArray(LongArray newArray) {
if (newArray.size() < array.size()) {
throw new SparkOutOfMemoryError("Not enough memory to grow pointer array");
}
Platform.copyMemory(
array.getBaseObject(),
array.getBaseOffset(),
newArray.getBaseObject(),
newArray.getBaseOffset(),
pos * 8L);
consumer.freeArray(array);
array = newArray;
usableCapacity = getUsableCapacity();
}
/**
* Inserts a record to be sorted. Assumes that the record pointer points to a record length
* stored as a 4-byte integer, followed by the record's bytes.
*
* @param recordPointer pointer to a record in a data page, encoded by {@link TaskMemoryManager}.
* @param keyPrefix a user-defined key prefix
*/
public void insertRecord(long recordPointer, long keyPrefix, boolean prefixIsNull) {
if (!hasSpaceForAnotherRecord()) {
throw new IllegalStateException("There is no space for new record");
}
if (prefixIsNull && radixSortSupport != null) {
// Swap forward a non-null record to make room for this one at the beginning of the array.
array.set(pos, array.get(nullBoundaryPos));
pos++;
array.set(pos, array.get(nullBoundaryPos + 1));
pos++;
// Place this record in the vacated position.
array.set(nullBoundaryPos, recordPointer);
nullBoundaryPos++;
array.set(nullBoundaryPos, keyPrefix);
nullBoundaryPos++;
} else {
array.set(pos, recordPointer);
pos++;
array.set(pos, keyPrefix);
pos++;
}
}
public final class SortedIterator extends UnsafeSorterIterator implements Cloneable {
private final int numRecords;
private int position;
private int offset;
private Object baseObject;
private long baseOffset;
private long keyPrefix;
private int recordLength;
private long currentPageNumber;
private final TaskContext taskContext = TaskContext.get();
private SortedIterator(int numRecords, int offset) {
this.numRecords = numRecords;
this.position = 0;
this.offset = offset;
}
public SortedIterator clone() {
SortedIterator iter = new SortedIterator(numRecords, offset);
iter.position = position;
iter.baseObject = baseObject;
iter.baseOffset = baseOffset;
iter.keyPrefix = keyPrefix;
iter.recordLength = recordLength;
iter.currentPageNumber = currentPageNumber;
return iter;
}
@Override
public int getNumRecords() {
return numRecords;
}
@Override
public boolean hasNext() {
return position / 2 < numRecords;
}
@Override
public void loadNext() {
// Kill the task in case it has been marked as killed. This logic is from
// InterruptibleIterator, but we inline it here instead of wrapping the iterator in order
// to avoid performance overhead. This check is added here in `loadNext()` instead of in
// `hasNext()` because it's technically possible for the caller to be relying on
// `getNumRecords()` instead of `hasNext()` to know when to stop.
if (taskContext != null) {
taskContext.killTaskIfInterrupted();
}
// This pointer points to a 4-byte record length, followed by the record's bytes
final long recordPointer = array.get(offset + position);
currentPageNumber = TaskMemoryManager.decodePageNumber(recordPointer);
int uaoSize = UnsafeAlignedOffset.getUaoSize();
baseObject = memoryManager.getPage(recordPointer);
// Skip over record length
baseOffset = memoryManager.getOffsetInPage(recordPointer) + uaoSize;
recordLength = UnsafeAlignedOffset.getSize(baseObject, baseOffset - uaoSize);
keyPrefix = array.get(offset + position + 1);
position += 2;
}
@Override
public Object getBaseObject() { return baseObject; }
@Override
public long getBaseOffset() { return baseOffset; }
public long getCurrentPageNumber() {
return currentPageNumber;
}
@Override
public int getRecordLength() { return recordLength; }
@Override
public long getKeyPrefix() { return keyPrefix; }
}
/**
* Return an iterator over record pointers in sorted order. For efficiency, all calls to
* {@code next()} will return the same mutable object.
*/
public UnsafeSorterIterator getSortedIterator() {
int offset = 0;
long start = System.nanoTime();
if (sortComparator != null) {
if (this.radixSortSupport != null) {
offset = RadixSort.sortKeyPrefixArray(
array, nullBoundaryPos, (pos - nullBoundaryPos) / 2L, 0, 7,
radixSortSupport.sortDescending(), radixSortSupport.sortSigned());
} else {
MemoryBlock unused = new MemoryBlock(
array.getBaseObject(),
array.getBaseOffset() + pos * 8L,
(array.size() - pos) * 8L);
LongArray buffer = new LongArray(unused);
Sorter<RecordPointerAndKeyPrefix, LongArray> sorter =
new Sorter<>(new UnsafeSortDataFormat(buffer));
sorter.sort(array, 0, pos / 2, sortComparator);
}
}
totalSortTimeNanos += System.nanoTime() - start;
if (nullBoundaryPos > 0) {
assert radixSortSupport != null : "Nulls are only stored separately with radix sort";
LinkedList<UnsafeSorterIterator> queue = new LinkedList<>();
// The null order is either LAST or FIRST, regardless of sorting direction (ASC|DESC)
if (radixSortSupport.nullsFirst()) {
queue.add(new SortedIterator(nullBoundaryPos / 2, 0));
queue.add(new SortedIterator((pos - nullBoundaryPos) / 2, offset));
} else {
queue.add(new SortedIterator((pos - nullBoundaryPos) / 2, offset));
queue.add(new SortedIterator(nullBoundaryPos / 2, 0));
}
return new UnsafeExternalSorter.ChainedIterator(queue);
} else {
return new SortedIterator(pos / 2, offset);
}
}
}
| 9,638 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/RecordPointerAndKeyPrefix.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection.unsafe.sort;
public final class RecordPointerAndKeyPrefix {
/**
* A pointer to a record; see {@link org.apache.spark.memory.TaskMemoryManager} for a
* description of how these addresses are encoded.
*/
public long recordPointer;
/**
* A key prefix, for use in comparisons.
*/
public long keyPrefix;
}
| 9,639 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/PrefixComparators.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection.unsafe.sort;
import com.google.common.primitives.UnsignedLongs;
import org.apache.spark.annotation.Private;
import org.apache.spark.unsafe.types.ByteArray;
import org.apache.spark.unsafe.types.UTF8String;
@Private
public class PrefixComparators {
private PrefixComparators() {}
public static final PrefixComparator STRING = new UnsignedPrefixComparator();
public static final PrefixComparator STRING_DESC = new UnsignedPrefixComparatorDesc();
public static final PrefixComparator STRING_NULLS_LAST = new UnsignedPrefixComparatorNullsLast();
public static final PrefixComparator STRING_DESC_NULLS_FIRST =
new UnsignedPrefixComparatorDescNullsFirst();
public static final PrefixComparator BINARY = new UnsignedPrefixComparator();
public static final PrefixComparator BINARY_DESC = new UnsignedPrefixComparatorDesc();
public static final PrefixComparator BINARY_NULLS_LAST = new UnsignedPrefixComparatorNullsLast();
public static final PrefixComparator BINARY_DESC_NULLS_FIRST =
new UnsignedPrefixComparatorDescNullsFirst();
public static final PrefixComparator LONG = new SignedPrefixComparator();
public static final PrefixComparator LONG_DESC = new SignedPrefixComparatorDesc();
public static final PrefixComparator LONG_NULLS_LAST = new SignedPrefixComparatorNullsLast();
public static final PrefixComparator LONG_DESC_NULLS_FIRST =
new SignedPrefixComparatorDescNullsFirst();
public static final PrefixComparator DOUBLE = new UnsignedPrefixComparator();
public static final PrefixComparator DOUBLE_DESC = new UnsignedPrefixComparatorDesc();
public static final PrefixComparator DOUBLE_NULLS_LAST = new UnsignedPrefixComparatorNullsLast();
public static final PrefixComparator DOUBLE_DESC_NULLS_FIRST =
new UnsignedPrefixComparatorDescNullsFirst();
public static final class StringPrefixComparator {
public static long computePrefix(UTF8String value) {
return value == null ? 0L : value.getPrefix();
}
}
public static final class BinaryPrefixComparator {
public static long computePrefix(byte[] bytes) {
return ByteArray.getPrefix(bytes);
}
}
public static final class DoublePrefixComparator {
/**
* Converts the double into a value that compares correctly as an unsigned long. For more
* details see http://stereopsis.com/radix.html.
*/
public static long computePrefix(double value) {
// normalize -0.0 to 0.0, as they should be equal
value = value == -0.0 ? 0.0 : value;
// Java's doubleToLongBits already canonicalizes all NaN values to the smallest possible
// positive NaN, so there's nothing special we need to do for NaNs.
long bits = Double.doubleToLongBits(value);
// Negative floats compare backwards due to their sign-magnitude representation, so flip
// all the bits in this case.
long mask = -(bits >>> 63) | 0x8000000000000000L;
return bits ^ mask;
}
}
/**
* Provides radix sort parameters. Comparators implementing this also are indicating that the
* ordering they define is compatible with radix sort.
*/
public abstract static class RadixSortSupport extends PrefixComparator {
/** @return Whether the sort should be descending in binary sort order. */
public abstract boolean sortDescending();
/** @return Whether the sort should take into account the sign bit. */
public abstract boolean sortSigned();
/** @return Whether the sort should put nulls first or last. */
public abstract boolean nullsFirst();
}
//
// Standard prefix comparator implementations
//
public static final class UnsignedPrefixComparator extends RadixSortSupport {
@Override public boolean sortDescending() { return false; }
@Override public boolean sortSigned() { return false; }
@Override public boolean nullsFirst() { return true; }
public int compare(long aPrefix, long bPrefix) {
return UnsignedLongs.compare(aPrefix, bPrefix);
}
}
public static final class UnsignedPrefixComparatorNullsLast extends RadixSortSupport {
@Override public boolean sortDescending() { return false; }
@Override public boolean sortSigned() { return false; }
@Override public boolean nullsFirst() { return false; }
public int compare(long aPrefix, long bPrefix) {
return UnsignedLongs.compare(aPrefix, bPrefix);
}
}
public static final class UnsignedPrefixComparatorDescNullsFirst extends RadixSortSupport {
@Override public boolean sortDescending() { return true; }
@Override public boolean sortSigned() { return false; }
@Override public boolean nullsFirst() { return true; }
public int compare(long bPrefix, long aPrefix) {
return UnsignedLongs.compare(aPrefix, bPrefix);
}
}
public static final class UnsignedPrefixComparatorDesc extends RadixSortSupport {
@Override public boolean sortDescending() { return true; }
@Override public boolean sortSigned() { return false; }
@Override public boolean nullsFirst() { return false; }
public int compare(long bPrefix, long aPrefix) {
return UnsignedLongs.compare(aPrefix, bPrefix);
}
}
public static final class SignedPrefixComparator extends RadixSortSupport {
@Override public boolean sortDescending() { return false; }
@Override public boolean sortSigned() { return true; }
@Override public boolean nullsFirst() { return true; }
public int compare(long a, long b) {
return (a < b) ? -1 : (a > b) ? 1 : 0;
}
}
public static final class SignedPrefixComparatorNullsLast extends RadixSortSupport {
@Override public boolean sortDescending() { return false; }
@Override public boolean sortSigned() { return true; }
@Override public boolean nullsFirst() { return false; }
public int compare(long a, long b) {
return (a < b) ? -1 : (a > b) ? 1 : 0;
}
}
public static final class SignedPrefixComparatorDescNullsFirst extends RadixSortSupport {
@Override public boolean sortDescending() { return true; }
@Override public boolean sortSigned() { return true; }
@Override public boolean nullsFirst() { return true; }
public int compare(long b, long a) {
return (a < b) ? -1 : (a > b) ? 1 : 0;
}
}
public static final class SignedPrefixComparatorDesc extends RadixSortSupport {
@Override public boolean sortDescending() { return true; }
@Override public boolean sortSigned() { return true; }
@Override public boolean nullsFirst() { return false; }
public int compare(long b, long a) {
return (a < b) ? -1 : (a > b) ? 1 : 0;
}
}
}
| 9,640 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/PrefixComparator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection.unsafe.sort;
import org.apache.spark.annotation.Private;
/**
* Compares 8-byte key prefixes in prefix sort. Subclasses may implement type-specific
* comparisons, such as lexicographic comparison for strings.
*/
@Private
public abstract class PrefixComparator {
public abstract int compare(long prefix1, long prefix2);
}
| 9,641 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/RecordComparator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection.unsafe.sort;
/**
* Compares records for ordering. In cases where the entire sorting key can fit in the 8-byte
* prefix, this may simply return 0.
*/
public abstract class RecordComparator {
/**
* Compare two records for order.
*
* @return a negative integer, zero, or a positive integer as the first record is less than,
* equal to, or greater than the second.
*/
public abstract int compare(
Object leftBaseObject,
long leftBaseOffset,
int leftBaseLength,
Object rightBaseObject,
long rightBaseOffset,
int rightBaseLength);
}
| 9,642 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe | Create_ds/spark/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection.unsafe.sort;
import javax.annotation.Nullable;
import java.io.File;
import java.io.IOException;
import java.util.LinkedList;
import java.util.Queue;
import java.util.function.Supplier;
import com.google.common.annotations.VisibleForTesting;
import org.apache.spark.memory.SparkOutOfMemoryError;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.TaskContext;
import org.apache.spark.executor.ShuffleWriteMetrics;
import org.apache.spark.memory.MemoryConsumer;
import org.apache.spark.memory.TaskMemoryManager;
import org.apache.spark.memory.TooLargePageException;
import org.apache.spark.serializer.SerializerManager;
import org.apache.spark.storage.BlockManager;
import org.apache.spark.unsafe.Platform;
import org.apache.spark.unsafe.UnsafeAlignedOffset;
import org.apache.spark.unsafe.array.LongArray;
import org.apache.spark.unsafe.memory.MemoryBlock;
import org.apache.spark.util.Utils;
/**
* External sorter based on {@link UnsafeInMemorySorter}.
*/
public final class UnsafeExternalSorter extends MemoryConsumer {
private static final Logger logger = LoggerFactory.getLogger(UnsafeExternalSorter.class);
@Nullable
private final PrefixComparator prefixComparator;
/**
* {@link RecordComparator} may probably keep the reference to the records they compared last
* time, so we should not keep a {@link RecordComparator} instance inside
* {@link UnsafeExternalSorter}, because {@link UnsafeExternalSorter} is referenced by
* {@link TaskContext} and thus can not be garbage collected until the end of the task.
*/
@Nullable
private final Supplier<RecordComparator> recordComparatorSupplier;
private final TaskMemoryManager taskMemoryManager;
private final BlockManager blockManager;
private final SerializerManager serializerManager;
private final TaskContext taskContext;
/** The buffer size to use when writing spills using DiskBlockObjectWriter */
private final int fileBufferSizeBytes;
/**
* Force this sorter to spill when there are this many elements in memory.
*/
private final int numElementsForSpillThreshold;
/**
* Memory pages that hold the records being sorted. The pages in this list are freed when
* spilling, although in principle we could recycle these pages across spills (on the other hand,
* this might not be necessary if we maintained a pool of re-usable pages in the TaskMemoryManager
* itself).
*/
private final LinkedList<MemoryBlock> allocatedPages = new LinkedList<>();
private final LinkedList<UnsafeSorterSpillWriter> spillWriters = new LinkedList<>();
// These variables are reset after spilling:
@Nullable private volatile UnsafeInMemorySorter inMemSorter;
private MemoryBlock currentPage = null;
private long pageCursor = -1;
private long peakMemoryUsedBytes = 0;
private long totalSpillBytes = 0L;
private long totalSortTimeNanos = 0L;
private volatile SpillableIterator readingIterator = null;
public static UnsafeExternalSorter createWithExistingInMemorySorter(
TaskMemoryManager taskMemoryManager,
BlockManager blockManager,
SerializerManager serializerManager,
TaskContext taskContext,
Supplier<RecordComparator> recordComparatorSupplier,
PrefixComparator prefixComparator,
int initialSize,
long pageSizeBytes,
int numElementsForSpillThreshold,
UnsafeInMemorySorter inMemorySorter) throws IOException {
UnsafeExternalSorter sorter = new UnsafeExternalSorter(taskMemoryManager, blockManager,
serializerManager, taskContext, recordComparatorSupplier, prefixComparator, initialSize,
pageSizeBytes, numElementsForSpillThreshold, inMemorySorter, false /* ignored */);
sorter.spill(Long.MAX_VALUE, sorter);
// The external sorter will be used to insert records, in-memory sorter is not needed.
sorter.inMemSorter = null;
return sorter;
}
public static UnsafeExternalSorter create(
TaskMemoryManager taskMemoryManager,
BlockManager blockManager,
SerializerManager serializerManager,
TaskContext taskContext,
Supplier<RecordComparator> recordComparatorSupplier,
PrefixComparator prefixComparator,
int initialSize,
long pageSizeBytes,
int numElementsForSpillThreshold,
boolean canUseRadixSort) {
return new UnsafeExternalSorter(taskMemoryManager, blockManager, serializerManager,
taskContext, recordComparatorSupplier, prefixComparator, initialSize, pageSizeBytes,
numElementsForSpillThreshold, null, canUseRadixSort);
}
private UnsafeExternalSorter(
TaskMemoryManager taskMemoryManager,
BlockManager blockManager,
SerializerManager serializerManager,
TaskContext taskContext,
Supplier<RecordComparator> recordComparatorSupplier,
PrefixComparator prefixComparator,
int initialSize,
long pageSizeBytes,
int numElementsForSpillThreshold,
@Nullable UnsafeInMemorySorter existingInMemorySorter,
boolean canUseRadixSort) {
super(taskMemoryManager, pageSizeBytes, taskMemoryManager.getTungstenMemoryMode());
this.taskMemoryManager = taskMemoryManager;
this.blockManager = blockManager;
this.serializerManager = serializerManager;
this.taskContext = taskContext;
this.recordComparatorSupplier = recordComparatorSupplier;
this.prefixComparator = prefixComparator;
// Use getSizeAsKb (not bytes) to maintain backwards compatibility for units
// this.fileBufferSizeBytes = (int) conf.getSizeAsKb("spark.shuffle.file.buffer", "32k") * 1024
this.fileBufferSizeBytes = 32 * 1024;
if (existingInMemorySorter == null) {
RecordComparator comparator = null;
if (recordComparatorSupplier != null) {
comparator = recordComparatorSupplier.get();
}
this.inMemSorter = new UnsafeInMemorySorter(
this,
taskMemoryManager,
comparator,
prefixComparator,
initialSize,
canUseRadixSort);
} else {
this.inMemSorter = existingInMemorySorter;
}
this.peakMemoryUsedBytes = getMemoryUsage();
this.numElementsForSpillThreshold = numElementsForSpillThreshold;
// Register a cleanup task with TaskContext to ensure that memory is guaranteed to be freed at
// the end of the task. This is necessary to avoid memory leaks in when the downstream operator
// does not fully consume the sorter's output (e.g. sort followed by limit).
taskContext.addTaskCompletionListener(context -> {
cleanupResources();
});
}
/**
* Marks the current page as no-more-space-available, and as a result, either allocate a
* new page or spill when we see the next record.
*/
@VisibleForTesting
public void closeCurrentPage() {
if (currentPage != null) {
pageCursor = currentPage.getBaseOffset() + currentPage.size();
}
}
/**
* Sort and spill the current records in response to memory pressure.
*/
@Override
public long spill(long size, MemoryConsumer trigger) throws IOException {
if (trigger != this) {
if (readingIterator != null) {
return readingIterator.spill();
}
return 0L; // this should throw exception
}
if (inMemSorter == null || inMemSorter.numRecords() <= 0) {
return 0L;
}
logger.info("Thread {} spilling sort data of {} to disk ({} {} so far)",
Thread.currentThread().getId(),
Utils.bytesToString(getMemoryUsage()),
spillWriters.size(),
spillWriters.size() > 1 ? " times" : " time");
ShuffleWriteMetrics writeMetrics = new ShuffleWriteMetrics();
// We only write out contents of the inMemSorter if it is not empty.
if (inMemSorter.numRecords() > 0) {
final UnsafeSorterSpillWriter spillWriter =
new UnsafeSorterSpillWriter(blockManager, fileBufferSizeBytes, writeMetrics,
inMemSorter.numRecords());
spillWriters.add(spillWriter);
spillIterator(inMemSorter.getSortedIterator(), spillWriter);
}
final long spillSize = freeMemory();
// Note that this is more-or-less going to be a multiple of the page size, so wasted space in
// pages will currently be counted as memory spilled even though that space isn't actually
// written to disk. This also counts the space needed to store the sorter's pointer array.
inMemSorter.reset();
// Reset the in-memory sorter's pointer array only after freeing up the memory pages holding the
// records. Otherwise, if the task is over allocated memory, then without freeing the memory
// pages, we might not be able to get memory for the pointer array.
taskContext.taskMetrics().incMemoryBytesSpilled(spillSize);
taskContext.taskMetrics().incDiskBytesSpilled(writeMetrics.bytesWritten());
totalSpillBytes += spillSize;
return spillSize;
}
/**
* Return the total memory usage of this sorter, including the data pages and the sorter's pointer
* array.
*/
private long getMemoryUsage() {
long totalPageSize = 0;
for (MemoryBlock page : allocatedPages) {
totalPageSize += page.size();
}
return ((inMemSorter == null) ? 0 : inMemSorter.getMemoryUsage()) + totalPageSize;
}
private void updatePeakMemoryUsed() {
long mem = getMemoryUsage();
if (mem > peakMemoryUsedBytes) {
peakMemoryUsedBytes = mem;
}
}
/**
* Return the peak memory used so far, in bytes.
*/
public long getPeakMemoryUsedBytes() {
updatePeakMemoryUsed();
return peakMemoryUsedBytes;
}
/**
* @return the total amount of time spent sorting data (in-memory only).
*/
public long getSortTimeNanos() {
UnsafeInMemorySorter sorter = inMemSorter;
if (sorter != null) {
return sorter.getSortTimeNanos();
}
return totalSortTimeNanos;
}
/**
* Return the total number of bytes that has been spilled into disk so far.
*/
public long getSpillSize() {
return totalSpillBytes;
}
@VisibleForTesting
public int getNumberOfAllocatedPages() {
return allocatedPages.size();
}
/**
* Free this sorter's data pages.
*
* @return the number of bytes freed.
*/
private long freeMemory() {
updatePeakMemoryUsed();
long memoryFreed = 0;
for (MemoryBlock block : allocatedPages) {
memoryFreed += block.size();
freePage(block);
}
allocatedPages.clear();
currentPage = null;
pageCursor = 0;
return memoryFreed;
}
/**
* Deletes any spill files created by this sorter.
*/
private void deleteSpillFiles() {
for (UnsafeSorterSpillWriter spill : spillWriters) {
File file = spill.getFile();
if (file != null && file.exists()) {
if (!file.delete()) {
logger.error("Was unable to delete spill file {}", file.getAbsolutePath());
}
}
}
}
/**
* Frees this sorter's in-memory data structures and cleans up its spill files.
*/
public void cleanupResources() {
synchronized (this) {
deleteSpillFiles();
freeMemory();
if (inMemSorter != null) {
inMemSorter.free();
inMemSorter = null;
}
}
}
/**
* Checks whether there is enough space to insert an additional record in to the sort pointer
* array and grows the array if additional space is required. If the required space cannot be
* obtained, then the in-memory data will be spilled to disk.
*/
private void growPointerArrayIfNecessary() throws IOException {
assert(inMemSorter != null);
if (!inMemSorter.hasSpaceForAnotherRecord()) {
long used = inMemSorter.getMemoryUsage();
LongArray array;
try {
// could trigger spilling
array = allocateArray(used / 8 * 2);
} catch (TooLargePageException e) {
// The pointer array is too big to fix in a single page, spill.
spill();
return;
} catch (SparkOutOfMemoryError e) {
// should have trigger spilling
if (!inMemSorter.hasSpaceForAnotherRecord()) {
logger.error("Unable to grow the pointer array");
throw e;
}
return;
}
// check if spilling is triggered or not
if (inMemSorter.hasSpaceForAnotherRecord()) {
freeArray(array);
} else {
inMemSorter.expandPointerArray(array);
}
}
}
/**
* Allocates more memory in order to insert an additional record. This will request additional
* memory from the memory manager and spill if the requested memory can not be obtained.
*
* @param required the required space in the data page, in bytes, including space for storing
* the record size. This must be less than or equal to the page size (records
* that exceed the page size are handled via a different code path which uses
* special overflow pages).
*/
private void acquireNewPageIfNecessary(int required) {
if (currentPage == null ||
pageCursor + required > currentPage.getBaseOffset() + currentPage.size()) {
// TODO: try to find space on previous pages
currentPage = allocatePage(required);
pageCursor = currentPage.getBaseOffset();
allocatedPages.add(currentPage);
}
}
/**
* Write a record to the sorter.
*/
public void insertRecord(
Object recordBase, long recordOffset, int length, long prefix, boolean prefixIsNull)
throws IOException {
assert(inMemSorter != null);
if (inMemSorter.numRecords() >= numElementsForSpillThreshold) {
logger.info("Spilling data because number of spilledRecords crossed the threshold " +
numElementsForSpillThreshold);
spill();
}
growPointerArrayIfNecessary();
int uaoSize = UnsafeAlignedOffset.getUaoSize();
// Need 4 or 8 bytes to store the record length.
final int required = length + uaoSize;
acquireNewPageIfNecessary(required);
final Object base = currentPage.getBaseObject();
final long recordAddress = taskMemoryManager.encodePageNumberAndOffset(currentPage, pageCursor);
UnsafeAlignedOffset.putSize(base, pageCursor, length);
pageCursor += uaoSize;
Platform.copyMemory(recordBase, recordOffset, base, pageCursor, length);
pageCursor += length;
inMemSorter.insertRecord(recordAddress, prefix, prefixIsNull);
}
/**
* Write a key-value record to the sorter. The key and value will be put together in-memory,
* using the following format:
*
* record length (4 bytes), key length (4 bytes), key data, value data
*
* record length = key length + value length + 4
*/
public void insertKVRecord(Object keyBase, long keyOffset, int keyLen,
Object valueBase, long valueOffset, int valueLen, long prefix, boolean prefixIsNull)
throws IOException {
growPointerArrayIfNecessary();
int uaoSize = UnsafeAlignedOffset.getUaoSize();
final int required = keyLen + valueLen + (2 * uaoSize);
acquireNewPageIfNecessary(required);
final Object base = currentPage.getBaseObject();
final long recordAddress = taskMemoryManager.encodePageNumberAndOffset(currentPage, pageCursor);
UnsafeAlignedOffset.putSize(base, pageCursor, keyLen + valueLen + uaoSize);
pageCursor += uaoSize;
UnsafeAlignedOffset.putSize(base, pageCursor, keyLen);
pageCursor += uaoSize;
Platform.copyMemory(keyBase, keyOffset, base, pageCursor, keyLen);
pageCursor += keyLen;
Platform.copyMemory(valueBase, valueOffset, base, pageCursor, valueLen);
pageCursor += valueLen;
assert(inMemSorter != null);
inMemSorter.insertRecord(recordAddress, prefix, prefixIsNull);
}
/**
* Merges another UnsafeExternalSorters into this one, the other one will be emptied.
*
* @throws IOException
*/
public void merge(UnsafeExternalSorter other) throws IOException {
other.spill();
spillWriters.addAll(other.spillWriters);
// remove them from `spillWriters`, or the files will be deleted in `cleanupResources`.
other.spillWriters.clear();
other.cleanupResources();
}
/**
* Returns a sorted iterator. It is the caller's responsibility to call `cleanupResources()`
* after consuming this iterator.
*/
public UnsafeSorterIterator getSortedIterator() throws IOException {
assert(recordComparatorSupplier != null);
if (spillWriters.isEmpty()) {
assert(inMemSorter != null);
readingIterator = new SpillableIterator(inMemSorter.getSortedIterator());
return readingIterator;
} else {
final UnsafeSorterSpillMerger spillMerger = new UnsafeSorterSpillMerger(
recordComparatorSupplier.get(), prefixComparator, spillWriters.size());
for (UnsafeSorterSpillWriter spillWriter : spillWriters) {
spillMerger.addSpillIfNotEmpty(spillWriter.getReader(serializerManager));
}
if (inMemSorter != null) {
readingIterator = new SpillableIterator(inMemSorter.getSortedIterator());
spillMerger.addSpillIfNotEmpty(readingIterator);
}
return spillMerger.getSortedIterator();
}
}
@VisibleForTesting boolean hasSpaceForAnotherRecord() {
return inMemSorter.hasSpaceForAnotherRecord();
}
private static void spillIterator(UnsafeSorterIterator inMemIterator,
UnsafeSorterSpillWriter spillWriter) throws IOException {
while (inMemIterator.hasNext()) {
inMemIterator.loadNext();
final Object baseObject = inMemIterator.getBaseObject();
final long baseOffset = inMemIterator.getBaseOffset();
final int recordLength = inMemIterator.getRecordLength();
spillWriter.write(baseObject, baseOffset, recordLength, inMemIterator.getKeyPrefix());
}
spillWriter.close();
}
/**
* An UnsafeSorterIterator that support spilling.
*/
class SpillableIterator extends UnsafeSorterIterator {
private UnsafeSorterIterator upstream;
private UnsafeSorterIterator nextUpstream = null;
private MemoryBlock lastPage = null;
private boolean loaded = false;
private int numRecords = 0;
SpillableIterator(UnsafeSorterIterator inMemIterator) {
this.upstream = inMemIterator;
this.numRecords = inMemIterator.getNumRecords();
}
@Override
public int getNumRecords() {
return numRecords;
}
public long spill() throws IOException {
synchronized (this) {
if (!(upstream instanceof UnsafeInMemorySorter.SortedIterator && nextUpstream == null
&& numRecords > 0)) {
return 0L;
}
UnsafeInMemorySorter.SortedIterator inMemIterator =
((UnsafeInMemorySorter.SortedIterator) upstream).clone();
ShuffleWriteMetrics writeMetrics = new ShuffleWriteMetrics();
// Iterate over the records that have not been returned and spill them.
final UnsafeSorterSpillWriter spillWriter =
new UnsafeSorterSpillWriter(blockManager, fileBufferSizeBytes, writeMetrics, numRecords);
spillIterator(inMemIterator, spillWriter);
spillWriters.add(spillWriter);
nextUpstream = spillWriter.getReader(serializerManager);
long released = 0L;
synchronized (UnsafeExternalSorter.this) {
// release the pages except the one that is used. There can still be a caller that
// is accessing the current record. We free this page in that caller's next loadNext()
// call.
for (MemoryBlock page : allocatedPages) {
if (!loaded || page.pageNumber !=
((UnsafeInMemorySorter.SortedIterator)upstream).getCurrentPageNumber()) {
released += page.size();
freePage(page);
} else {
lastPage = page;
}
}
allocatedPages.clear();
}
// in-memory sorter will not be used after spilling
assert(inMemSorter != null);
released += inMemSorter.getMemoryUsage();
totalSortTimeNanos += inMemSorter.getSortTimeNanos();
inMemSorter.free();
inMemSorter = null;
taskContext.taskMetrics().incMemoryBytesSpilled(released);
taskContext.taskMetrics().incDiskBytesSpilled(writeMetrics.bytesWritten());
totalSpillBytes += released;
return released;
}
}
@Override
public boolean hasNext() {
return numRecords > 0;
}
@Override
public void loadNext() throws IOException {
MemoryBlock pageToFree = null;
try {
synchronized (this) {
loaded = true;
if (nextUpstream != null) {
// Just consumed the last record from in memory iterator
if(lastPage != null) {
// Do not free the page here, while we are locking `SpillableIterator`. The `freePage`
// method locks the `TaskMemoryManager`, and it's a bad idea to lock 2 objects in
// sequence. We may hit dead lock if another thread locks `TaskMemoryManager` and
// `SpillableIterator` in sequence, which may happen in
// `TaskMemoryManager.acquireExecutionMemory`.
pageToFree = lastPage;
lastPage = null;
}
upstream = nextUpstream;
nextUpstream = null;
}
numRecords--;
upstream.loadNext();
}
} finally {
if (pageToFree != null) {
freePage(pageToFree);
}
}
}
@Override
public Object getBaseObject() {
return upstream.getBaseObject();
}
@Override
public long getBaseOffset() {
return upstream.getBaseOffset();
}
@Override
public int getRecordLength() {
return upstream.getRecordLength();
}
@Override
public long getKeyPrefix() {
return upstream.getKeyPrefix();
}
}
/**
* Returns an iterator starts from startIndex, which will return the rows in the order as
* inserted.
*
* It is the caller's responsibility to call `cleanupResources()`
* after consuming this iterator.
*
* TODO: support forced spilling
*/
public UnsafeSorterIterator getIterator(int startIndex) throws IOException {
if (spillWriters.isEmpty()) {
assert(inMemSorter != null);
UnsafeSorterIterator iter = inMemSorter.getSortedIterator();
moveOver(iter, startIndex);
return iter;
} else {
LinkedList<UnsafeSorterIterator> queue = new LinkedList<>();
int i = 0;
for (UnsafeSorterSpillWriter spillWriter : spillWriters) {
if (i + spillWriter.recordsSpilled() > startIndex) {
UnsafeSorterIterator iter = spillWriter.getReader(serializerManager);
moveOver(iter, startIndex - i);
queue.add(iter);
}
i += spillWriter.recordsSpilled();
}
if (inMemSorter != null) {
UnsafeSorterIterator iter = inMemSorter.getSortedIterator();
moveOver(iter, startIndex - i);
queue.add(iter);
}
return new ChainedIterator(queue);
}
}
private void moveOver(UnsafeSorterIterator iter, int steps)
throws IOException {
if (steps > 0) {
for (int i = 0; i < steps; i++) {
if (iter.hasNext()) {
iter.loadNext();
} else {
throw new ArrayIndexOutOfBoundsException("Failed to move the iterator " + steps +
" steps forward");
}
}
}
}
/**
* Chain multiple UnsafeSorterIterator together as single one.
*/
static class ChainedIterator extends UnsafeSorterIterator {
private final Queue<UnsafeSorterIterator> iterators;
private UnsafeSorterIterator current;
private int numRecords;
ChainedIterator(Queue<UnsafeSorterIterator> iterators) {
assert iterators.size() > 0;
this.numRecords = 0;
for (UnsafeSorterIterator iter: iterators) {
this.numRecords += iter.getNumRecords();
}
this.iterators = iterators;
this.current = iterators.remove();
}
@Override
public int getNumRecords() {
return numRecords;
}
@Override
public boolean hasNext() {
while (!current.hasNext() && !iterators.isEmpty()) {
current = iterators.remove();
}
return current.hasNext();
}
@Override
public void loadNext() throws IOException {
while (!current.hasNext() && !iterators.isEmpty()) {
current = iterators.remove();
}
current.loadNext();
}
@Override
public Object getBaseObject() { return current.getBaseObject(); }
@Override
public long getBaseOffset() { return current.getBaseOffset(); }
@Override
public int getRecordLength() { return current.getRecordLength(); }
@Override
public long getKeyPrefix() { return current.getKeyPrefix(); }
}
}
| 9,643 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark | Create_ds/spark/core/src/main/java/org/apache/spark/io/ReadAheadInputStream.java | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.io;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import org.apache.spark.util.ThreadUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.concurrent.GuardedBy;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InterruptedIOException;
import java.nio.ByteBuffer;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
/**
* {@link InputStream} implementation which asynchronously reads ahead from the underlying input
* stream when specified amount of data has been read from the current buffer. It does it by
* maintaining two buffers - active buffer and read ahead buffer. Active buffer contains data
* which should be returned when a read() call is issued. The read ahead buffer is used to
* asynchronously read from the underlying input stream and once the current active buffer is
* exhausted, we flip the two buffers so that we can start reading from the read ahead buffer
* without being blocked in disk I/O.
*/
public class ReadAheadInputStream extends InputStream {
private static final Logger logger = LoggerFactory.getLogger(ReadAheadInputStream.class);
private ReentrantLock stateChangeLock = new ReentrantLock();
@GuardedBy("stateChangeLock")
private ByteBuffer activeBuffer;
@GuardedBy("stateChangeLock")
private ByteBuffer readAheadBuffer;
@GuardedBy("stateChangeLock")
private boolean endOfStream;
@GuardedBy("stateChangeLock")
// true if async read is in progress
private boolean readInProgress;
@GuardedBy("stateChangeLock")
// true if read is aborted due to an exception in reading from underlying input stream.
private boolean readAborted;
@GuardedBy("stateChangeLock")
private Throwable readException;
@GuardedBy("stateChangeLock")
// whether the close method is called.
private boolean isClosed;
@GuardedBy("stateChangeLock")
// true when the close method will close the underlying input stream. This is valid only if
// `isClosed` is true.
private boolean isUnderlyingInputStreamBeingClosed;
@GuardedBy("stateChangeLock")
// whether there is a read ahead task running,
private boolean isReading;
// whether there is a reader waiting for data.
private AtomicBoolean isWaiting = new AtomicBoolean(false);
private final InputStream underlyingInputStream;
private final ExecutorService executorService =
ThreadUtils.newDaemonSingleThreadExecutor("read-ahead");
private final Condition asyncReadComplete = stateChangeLock.newCondition();
private static final ThreadLocal<byte[]> oneByte = ThreadLocal.withInitial(() -> new byte[1]);
/**
* Creates a <code>ReadAheadInputStream</code> with the specified buffer size and read-ahead
* threshold
*
* @param inputStream The underlying input stream.
* @param bufferSizeInBytes The buffer size.
*/
public ReadAheadInputStream(
InputStream inputStream, int bufferSizeInBytes) {
Preconditions.checkArgument(bufferSizeInBytes > 0,
"bufferSizeInBytes should be greater than 0, but the value is " + bufferSizeInBytes);
activeBuffer = ByteBuffer.allocate(bufferSizeInBytes);
readAheadBuffer = ByteBuffer.allocate(bufferSizeInBytes);
this.underlyingInputStream = inputStream;
activeBuffer.flip();
readAheadBuffer.flip();
}
private boolean isEndOfStream() {
return (!activeBuffer.hasRemaining() && !readAheadBuffer.hasRemaining() && endOfStream);
}
private void checkReadException() throws IOException {
if (readAborted) {
Throwables.propagateIfPossible(readException, IOException.class);
throw new IOException(readException);
}
}
/** Read data from underlyingInputStream to readAheadBuffer asynchronously. */
private void readAsync() throws IOException {
stateChangeLock.lock();
final byte[] arr = readAheadBuffer.array();
try {
if (endOfStream || readInProgress) {
return;
}
checkReadException();
readAheadBuffer.position(0);
readAheadBuffer.flip();
readInProgress = true;
} finally {
stateChangeLock.unlock();
}
executorService.execute(new Runnable() {
@Override
public void run() {
stateChangeLock.lock();
try {
if (isClosed) {
readInProgress = false;
return;
}
// Flip this so that the close method will not close the underlying input stream when we
// are reading.
isReading = true;
} finally {
stateChangeLock.unlock();
}
// Please note that it is safe to release the lock and read into the read ahead buffer
// because either of following two conditions will hold - 1. The active buffer has
// data available to read so the reader will not read from the read ahead buffer.
// 2. This is the first time read is called or the active buffer is exhausted,
// in that case the reader waits for this async read to complete.
// So there is no race condition in both the situations.
int read = 0;
int off = 0, len = arr.length;
Throwable exception = null;
try {
// try to fill the read ahead buffer.
// if a reader is waiting, possibly return early.
do {
read = underlyingInputStream.read(arr, off, len);
if (read <= 0) break;
off += read;
len -= read;
} while (len > 0 && !isWaiting.get());
} catch (Throwable ex) {
exception = ex;
if (ex instanceof Error) {
// `readException` may not be reported to the user. Rethrow Error to make sure at least
// The user can see Error in UncaughtExceptionHandler.
throw (Error) ex;
}
} finally {
stateChangeLock.lock();
readAheadBuffer.limit(off);
if (read < 0 || (exception instanceof EOFException)) {
endOfStream = true;
} else if (exception != null) {
readAborted = true;
readException = exception;
}
readInProgress = false;
signalAsyncReadComplete();
stateChangeLock.unlock();
closeUnderlyingInputStreamIfNecessary();
}
}
});
}
private void closeUnderlyingInputStreamIfNecessary() {
boolean needToCloseUnderlyingInputStream = false;
stateChangeLock.lock();
try {
isReading = false;
if (isClosed && !isUnderlyingInputStreamBeingClosed) {
// close method cannot close underlyingInputStream because we were reading.
needToCloseUnderlyingInputStream = true;
}
} finally {
stateChangeLock.unlock();
}
if (needToCloseUnderlyingInputStream) {
try {
underlyingInputStream.close();
} catch (IOException e) {
logger.warn(e.getMessage(), e);
}
}
}
private void signalAsyncReadComplete() {
stateChangeLock.lock();
try {
asyncReadComplete.signalAll();
} finally {
stateChangeLock.unlock();
}
}
private void waitForAsyncReadComplete() throws IOException {
stateChangeLock.lock();
isWaiting.set(true);
try {
// There is only one reader, and one writer, so the writer should signal only once,
// but a while loop checking the wake up condition is still needed to avoid spurious wakeups.
while (readInProgress) {
asyncReadComplete.await();
}
} catch (InterruptedException e) {
InterruptedIOException iio = new InterruptedIOException(e.getMessage());
iio.initCause(e);
throw iio;
} finally {
isWaiting.set(false);
stateChangeLock.unlock();
}
checkReadException();
}
@Override
public int read() throws IOException {
if (activeBuffer.hasRemaining()) {
// short path - just get one byte.
return activeBuffer.get() & 0xFF;
} else {
byte[] oneByteArray = oneByte.get();
return read(oneByteArray, 0, 1) == -1 ? -1 : oneByteArray[0] & 0xFF;
}
}
@Override
public int read(byte[] b, int offset, int len) throws IOException {
if (offset < 0 || len < 0 || len > b.length - offset) {
throw new IndexOutOfBoundsException();
}
if (len == 0) {
return 0;
}
if (!activeBuffer.hasRemaining()) {
// No remaining in active buffer - lock and switch to write ahead buffer.
stateChangeLock.lock();
try {
waitForAsyncReadComplete();
if (!readAheadBuffer.hasRemaining()) {
// The first read.
readAsync();
waitForAsyncReadComplete();
if (isEndOfStream()) {
return -1;
}
}
// Swap the newly read read ahead buffer in place of empty active buffer.
swapBuffers();
// After swapping buffers, trigger another async read for read ahead buffer.
readAsync();
} finally {
stateChangeLock.unlock();
}
}
len = Math.min(len, activeBuffer.remaining());
activeBuffer.get(b, offset, len);
return len;
}
/**
* flip the active and read ahead buffer
*/
private void swapBuffers() {
ByteBuffer temp = activeBuffer;
activeBuffer = readAheadBuffer;
readAheadBuffer = temp;
}
@Override
public int available() throws IOException {
stateChangeLock.lock();
// Make sure we have no integer overflow.
try {
return (int) Math.min((long) Integer.MAX_VALUE,
(long) activeBuffer.remaining() + readAheadBuffer.remaining());
} finally {
stateChangeLock.unlock();
}
}
@Override
public long skip(long n) throws IOException {
if (n <= 0L) {
return 0L;
}
if (n <= activeBuffer.remaining()) {
// Only skipping from active buffer is sufficient
activeBuffer.position((int) n + activeBuffer.position());
return n;
}
stateChangeLock.lock();
long skipped;
try {
skipped = skipInternal(n);
} finally {
stateChangeLock.unlock();
}
return skipped;
}
/**
* Internal skip function which should be called only from skip() api. The assumption is that
* the stateChangeLock is already acquired in the caller before calling this function.
*/
private long skipInternal(long n) throws IOException {
assert (stateChangeLock.isLocked());
waitForAsyncReadComplete();
if (isEndOfStream()) {
return 0;
}
if (available() >= n) {
// we can skip from the internal buffers
int toSkip = (int) n;
// We need to skip from both active buffer and read ahead buffer
toSkip -= activeBuffer.remaining();
assert(toSkip > 0); // skipping from activeBuffer already handled.
activeBuffer.position(0);
activeBuffer.flip();
readAheadBuffer.position(toSkip + readAheadBuffer.position());
swapBuffers();
// Trigger async read to emptied read ahead buffer.
readAsync();
return n;
} else {
int skippedBytes = available();
long toSkip = n - skippedBytes;
activeBuffer.position(0);
activeBuffer.flip();
readAheadBuffer.position(0);
readAheadBuffer.flip();
long skippedFromInputStream = underlyingInputStream.skip(toSkip);
readAsync();
return skippedBytes + skippedFromInputStream;
}
}
@Override
public void close() throws IOException {
boolean isSafeToCloseUnderlyingInputStream = false;
stateChangeLock.lock();
try {
if (isClosed) {
return;
}
isClosed = true;
if (!isReading) {
// Nobody is reading, so we can close the underlying input stream in this method.
isSafeToCloseUnderlyingInputStream = true;
// Flip this to make sure the read ahead task will not close the underlying input stream.
isUnderlyingInputStreamBeingClosed = true;
}
} finally {
stateChangeLock.unlock();
}
try {
executorService.shutdownNow();
executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS);
} catch (InterruptedException e) {
InterruptedIOException iio = new InterruptedIOException(e.getMessage());
iio.initCause(e);
throw iio;
} finally {
if (isSafeToCloseUnderlyingInputStream) {
underlyingInputStream.close();
}
}
}
}
| 9,644 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark | Create_ds/spark/core/src/main/java/org/apache/spark/io/NioBufferedFileInputStream.java | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.io;
import org.apache.spark.storage.StorageUtils;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.StandardOpenOption;
/**
* {@link InputStream} implementation which uses direct buffer
* to read a file to avoid extra copy of data between Java and
* native memory which happens when using {@link java.io.BufferedInputStream}.
* Unfortunately, this is not something already available in JDK,
* {@link sun.nio.ch.ChannelInputStream} supports reading a file using nio,
* but does not support buffering.
*/
public final class NioBufferedFileInputStream extends InputStream {
private static final int DEFAULT_BUFFER_SIZE_BYTES = 8192;
private final ByteBuffer byteBuffer;
private final FileChannel fileChannel;
public NioBufferedFileInputStream(File file, int bufferSizeInBytes) throws IOException {
byteBuffer = ByteBuffer.allocateDirect(bufferSizeInBytes);
fileChannel = FileChannel.open(file.toPath(), StandardOpenOption.READ);
byteBuffer.flip();
}
public NioBufferedFileInputStream(File file) throws IOException {
this(file, DEFAULT_BUFFER_SIZE_BYTES);
}
/**
* Checks weather data is left to be read from the input stream.
* @return true if data is left, false otherwise
* @throws IOException
*/
private boolean refill() throws IOException {
if (!byteBuffer.hasRemaining()) {
byteBuffer.clear();
int nRead = 0;
while (nRead == 0) {
nRead = fileChannel.read(byteBuffer);
}
if (nRead < 0) {
return false;
}
byteBuffer.flip();
}
return true;
}
@Override
public synchronized int read() throws IOException {
if (!refill()) {
return -1;
}
return byteBuffer.get() & 0xFF;
}
@Override
public synchronized int read(byte[] b, int offset, int len) throws IOException {
if (offset < 0 || len < 0 || offset + len < 0 || offset + len > b.length) {
throw new IndexOutOfBoundsException();
}
if (!refill()) {
return -1;
}
len = Math.min(len, byteBuffer.remaining());
byteBuffer.get(b, offset, len);
return len;
}
@Override
public synchronized int available() throws IOException {
return byteBuffer.remaining();
}
@Override
public synchronized long skip(long n) throws IOException {
if (n <= 0L) {
return 0L;
}
if (byteBuffer.remaining() >= n) {
// The buffered content is enough to skip
byteBuffer.position(byteBuffer.position() + (int) n);
return n;
}
long skippedFromBuffer = byteBuffer.remaining();
long toSkipFromFileChannel = n - skippedFromBuffer;
// Discard everything we have read in the buffer.
byteBuffer.position(0);
byteBuffer.flip();
return skippedFromBuffer + skipFromFileChannel(toSkipFromFileChannel);
}
private long skipFromFileChannel(long n) throws IOException {
long currentFilePosition = fileChannel.position();
long size = fileChannel.size();
if (n > size - currentFilePosition) {
fileChannel.position(size);
return size - currentFilePosition;
} else {
fileChannel.position(currentFilePosition + n);
return n;
}
}
@Override
public synchronized void close() throws IOException {
fileChannel.close();
StorageUtils.dispose(byteBuffer);
}
@Override
protected void finalize() throws IOException {
close();
}
}
| 9,645 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/status/api | Create_ds/spark/core/src/main/java/org/apache/spark/status/api/v1/ApplicationStatus.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.status.api.v1;
import org.apache.spark.util.EnumUtil;
public enum ApplicationStatus {
COMPLETED,
RUNNING;
public static ApplicationStatus fromString(String str) {
return EnumUtil.parseIgnoreCase(ApplicationStatus.class, str);
}
}
| 9,646 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/status/api | Create_ds/spark/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.status.api.v1;
import org.apache.spark.util.EnumUtil;
import java.util.Collections;
import java.util.HashSet;
import java.util.Locale;
import java.util.Set;
public enum TaskSorting {
ID,
INCREASING_RUNTIME("runtime"),
DECREASING_RUNTIME("-runtime");
private final Set<String> alternateNames;
TaskSorting(String... names) {
alternateNames = new HashSet<>();
Collections.addAll(alternateNames, names);
}
public static TaskSorting fromString(String str) {
String lower = str.toLowerCase(Locale.ROOT);
for (TaskSorting t: values()) {
if (t.alternateNames.contains(lower)) {
return t;
}
}
return EnumUtil.parseIgnoreCase(TaskSorting.class, str);
}
}
| 9,647 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/status/api | Create_ds/spark/core/src/main/java/org/apache/spark/status/api/v1/StageStatus.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.status.api.v1;
import org.apache.spark.util.EnumUtil;
public enum StageStatus {
ACTIVE,
COMPLETE,
FAILED,
PENDING,
SKIPPED;
public static StageStatus fromString(String str) {
return EnumUtil.parseIgnoreCase(StageStatus.class, str);
}
}
| 9,648 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark | Create_ds/spark/core/src/main/java/org/apache/spark/storage/TimeTrackingOutputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.storage;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.spark.annotation.Private;
import org.apache.spark.executor.ShuffleWriteMetrics;
/**
* Intercepts write calls and tracks total time spent writing in order to update shuffle write
* metrics. Not thread safe.
*/
@Private
public final class TimeTrackingOutputStream extends OutputStream {
private final ShuffleWriteMetrics writeMetrics;
private final OutputStream outputStream;
public TimeTrackingOutputStream(ShuffleWriteMetrics writeMetrics, OutputStream outputStream) {
this.writeMetrics = writeMetrics;
this.outputStream = outputStream;
}
@Override
public void write(int b) throws IOException {
final long startTime = System.nanoTime();
outputStream.write(b);
writeMetrics.incWriteTime(System.nanoTime() - startTime);
}
@Override
public void write(byte[] b) throws IOException {
final long startTime = System.nanoTime();
outputStream.write(b);
writeMetrics.incWriteTime(System.nanoTime() - startTime);
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
final long startTime = System.nanoTime();
outputStream.write(b, off, len);
writeMetrics.incWriteTime(System.nanoTime() - startTime);
}
@Override
public void flush() throws IOException {
final long startTime = System.nanoTime();
outputStream.flush();
writeMetrics.incWriteTime(System.nanoTime() - startTime);
}
@Override
public void close() throws IOException {
final long startTime = System.nanoTime();
outputStream.close();
writeMetrics.incWriteTime(System.nanoTime() - startTime);
}
}
| 9,649 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/unsafe | Create_ds/spark/core/src/main/java/org/apache/spark/unsafe/map/BytesToBytesMap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.map;
import javax.annotation.Nullable;
import java.io.File;
import java.io.IOException;
import java.util.Iterator;
import java.util.LinkedList;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.io.Closeables;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.SparkEnv;
import org.apache.spark.executor.ShuffleWriteMetrics;
import org.apache.spark.memory.MemoryConsumer;
import org.apache.spark.memory.TaskMemoryManager;
import org.apache.spark.serializer.SerializerManager;
import org.apache.spark.storage.BlockManager;
import org.apache.spark.unsafe.Platform;
import org.apache.spark.unsafe.UnsafeAlignedOffset;
import org.apache.spark.unsafe.array.ByteArrayMethods;
import org.apache.spark.unsafe.array.LongArray;
import org.apache.spark.unsafe.hash.Murmur3_x86_32;
import org.apache.spark.unsafe.memory.MemoryBlock;
import org.apache.spark.util.collection.unsafe.sort.UnsafeSorterSpillReader;
import org.apache.spark.util.collection.unsafe.sort.UnsafeSorterSpillWriter;
/**
* An append-only hash map where keys and values are contiguous regions of bytes.
*
* This is backed by a power-of-2-sized hash table, using quadratic probing with triangular numbers,
* which is guaranteed to exhaust the space.
*
* The map can support up to 2^29 keys. If the key cardinality is higher than this, you should
* probably be using sorting instead of hashing for better cache locality.
*
* The key and values under the hood are stored together, in the following format:
* Bytes 0 to 4: len(k) (key length in bytes) + len(v) (value length in bytes) + 4
* Bytes 4 to 8: len(k)
* Bytes 8 to 8 + len(k): key data
* Bytes 8 + len(k) to 8 + len(k) + len(v): value data
* Bytes 8 + len(k) + len(v) to 8 + len(k) + len(v) + 8: pointer to next pair
*
* This means that the first four bytes store the entire record (key + value) length. This format
* is compatible with {@link org.apache.spark.util.collection.unsafe.sort.UnsafeExternalSorter},
* so we can pass records from this map directly into the sorter to sort records in place.
*/
public final class BytesToBytesMap extends MemoryConsumer {
private static final Logger logger = LoggerFactory.getLogger(BytesToBytesMap.class);
private static final HashMapGrowthStrategy growthStrategy = HashMapGrowthStrategy.DOUBLING;
private final TaskMemoryManager taskMemoryManager;
/**
* A linked list for tracking all allocated data pages so that we can free all of our memory.
*/
private final LinkedList<MemoryBlock> dataPages = new LinkedList<>();
/**
* The data page that will be used to store keys and values for new hashtable entries. When this
* page becomes full, a new page will be allocated and this pointer will change to point to that
* new page.
*/
private MemoryBlock currentPage = null;
/**
* Offset into `currentPage` that points to the location where new data can be inserted into
* the page. This does not incorporate the page's base offset.
*/
private long pageCursor = 0;
/**
* The maximum number of keys that BytesToBytesMap supports. The hash table has to be
* power-of-2-sized and its backing Java array can contain at most (1 << 30) elements,
* since that's the largest power-of-2 that's less than Integer.MAX_VALUE. We need two long array
* entries per key, giving us a maximum capacity of (1 << 29).
*/
@VisibleForTesting
static final int MAX_CAPACITY = (1 << 29);
// This choice of page table size and page size means that we can address up to 500 gigabytes
// of memory.
/**
* A single array to store the key and value.
*
* Position {@code 2 * i} in the array is used to track a pointer to the key at index {@code i},
* while position {@code 2 * i + 1} in the array holds key's full 32-bit hashcode.
*/
@Nullable private LongArray longArray;
// TODO: we're wasting 32 bits of space here; we can probably store fewer bits of the hashcode
// and exploit word-alignment to use fewer bits to hold the address. This might let us store
// only one long per map entry, increasing the chance that this array will fit in cache at the
// expense of maybe performing more lookups if we have hash collisions. Say that we stored only
// 27 bits of the hashcode and 37 bits of the address. 37 bits is enough to address 1 terabyte
// of RAM given word-alignment. If we use 13 bits of this for our page table, that gives us a
// maximum page size of 2^24 * 8 = ~134 megabytes per page. This change will require us to store
// full base addresses in the page table for off-heap mode so that we can reconstruct the full
// absolute memory addresses.
/**
* Whether or not the longArray can grow. We will not insert more elements if it's false.
*/
private boolean canGrowArray = true;
private final double loadFactor;
/**
* The size of the data pages that hold key and value data. Map entries cannot span multiple
* pages, so this limits the maximum entry size.
*/
private final long pageSizeBytes;
/**
* Number of keys defined in the map.
*/
private int numKeys;
/**
* Number of values defined in the map. A key could have multiple values.
*/
private int numValues;
/**
* The map will be expanded once the number of keys exceeds this threshold.
*/
private int growthThreshold;
/**
* Mask for truncating hashcodes so that they do not exceed the long array's size.
* This is a strength reduction optimization; we're essentially performing a modulus operation,
* but doing so with a bitmask because this is a power-of-2-sized hash map.
*/
private int mask;
/**
* Return value of {@link BytesToBytesMap#lookup(Object, long, int)}.
*/
private final Location loc;
private final boolean enablePerfMetrics;
private long numProbes = 0;
private long numKeyLookups = 0;
private long peakMemoryUsedBytes = 0L;
private final int initialCapacity;
private final BlockManager blockManager;
private final SerializerManager serializerManager;
private volatile MapIterator destructiveIterator = null;
private LinkedList<UnsafeSorterSpillWriter> spillWriters = new LinkedList<>();
public BytesToBytesMap(
TaskMemoryManager taskMemoryManager,
BlockManager blockManager,
SerializerManager serializerManager,
int initialCapacity,
double loadFactor,
long pageSizeBytes,
boolean enablePerfMetrics) {
super(taskMemoryManager, pageSizeBytes, taskMemoryManager.getTungstenMemoryMode());
this.taskMemoryManager = taskMemoryManager;
this.blockManager = blockManager;
this.serializerManager = serializerManager;
this.loadFactor = loadFactor;
this.loc = new Location();
this.pageSizeBytes = pageSizeBytes;
this.enablePerfMetrics = enablePerfMetrics;
if (initialCapacity <= 0) {
throw new IllegalArgumentException("Initial capacity must be greater than 0");
}
if (initialCapacity > MAX_CAPACITY) {
throw new IllegalArgumentException(
"Initial capacity " + initialCapacity + " exceeds maximum capacity of " + MAX_CAPACITY);
}
if (pageSizeBytes > TaskMemoryManager.MAXIMUM_PAGE_SIZE_BYTES) {
throw new IllegalArgumentException("Page size " + pageSizeBytes + " cannot exceed " +
TaskMemoryManager.MAXIMUM_PAGE_SIZE_BYTES);
}
this.initialCapacity = initialCapacity;
allocate(initialCapacity);
}
public BytesToBytesMap(
TaskMemoryManager taskMemoryManager,
int initialCapacity,
long pageSizeBytes) {
this(taskMemoryManager, initialCapacity, pageSizeBytes, false);
}
public BytesToBytesMap(
TaskMemoryManager taskMemoryManager,
int initialCapacity,
long pageSizeBytes,
boolean enablePerfMetrics) {
this(
taskMemoryManager,
SparkEnv.get() != null ? SparkEnv.get().blockManager() : null,
SparkEnv.get() != null ? SparkEnv.get().serializerManager() : null,
initialCapacity,
// In order to re-use the longArray for sorting, the load factor cannot be larger than 0.5.
0.5,
pageSizeBytes,
enablePerfMetrics);
}
/**
* Returns the number of keys defined in the map.
*/
public int numKeys() { return numKeys; }
/**
* Returns the number of values defined in the map. A key could have multiple values.
*/
public int numValues() { return numValues; }
public final class MapIterator implements Iterator<Location> {
private int numRecords;
private final Location loc;
private MemoryBlock currentPage = null;
private int recordsInPage = 0;
private Object pageBaseObject;
private long offsetInPage;
// If this iterator destructive or not. When it is true, it frees each page as it moves onto
// next one.
private boolean destructive = false;
private UnsafeSorterSpillReader reader = null;
private MapIterator(int numRecords, Location loc, boolean destructive) {
this.numRecords = numRecords;
this.loc = loc;
this.destructive = destructive;
if (destructive) {
destructiveIterator = this;
// longArray will not be used anymore if destructive is true, release it now.
if (longArray != null) {
freeArray(longArray);
longArray = null;
}
}
}
private void advanceToNextPage() {
// SPARK-26265: We will first lock this `MapIterator` and then `TaskMemoryManager` when going
// to free a memory page by calling `freePage`. At the same time, it is possibly that another
// memory consumer first locks `TaskMemoryManager` and then this `MapIterator` when it
// acquires memory and causes spilling on this `MapIterator`. To avoid deadlock here, we keep
// reference to the page to free and free it after releasing the lock of `MapIterator`.
MemoryBlock pageToFree = null;
try {
synchronized (this) {
int nextIdx = dataPages.indexOf(currentPage) + 1;
if (destructive && currentPage != null) {
dataPages.remove(currentPage);
pageToFree = currentPage;
nextIdx--;
}
if (dataPages.size() > nextIdx) {
currentPage = dataPages.get(nextIdx);
pageBaseObject = currentPage.getBaseObject();
offsetInPage = currentPage.getBaseOffset();
recordsInPage = UnsafeAlignedOffset.getSize(pageBaseObject, offsetInPage);
offsetInPage += UnsafeAlignedOffset.getUaoSize();
} else {
currentPage = null;
if (reader != null) {
handleFailedDelete();
}
try {
Closeables.close(reader, /* swallowIOException = */ false);
reader = spillWriters.getFirst().getReader(serializerManager);
recordsInPage = -1;
} catch (IOException e) {
// Scala iterator does not handle exception
Platform.throwException(e);
}
}
}
} finally {
if (pageToFree != null) {
freePage(pageToFree);
}
}
}
@Override
public boolean hasNext() {
if (numRecords == 0) {
if (reader != null) {
handleFailedDelete();
}
}
return numRecords > 0;
}
@Override
public Location next() {
if (recordsInPage == 0) {
advanceToNextPage();
}
numRecords--;
if (currentPage != null) {
int totalLength = UnsafeAlignedOffset.getSize(pageBaseObject, offsetInPage);
loc.with(currentPage, offsetInPage);
// [total size] [key size] [key] [value] [pointer to next]
offsetInPage += UnsafeAlignedOffset.getUaoSize() + totalLength + 8;
recordsInPage --;
return loc;
} else {
assert(reader != null);
if (!reader.hasNext()) {
advanceToNextPage();
}
try {
reader.loadNext();
} catch (IOException e) {
try {
reader.close();
} catch(IOException e2) {
logger.error("Error while closing spill reader", e2);
}
// Scala iterator does not handle exception
Platform.throwException(e);
}
loc.with(reader.getBaseObject(), reader.getBaseOffset(), reader.getRecordLength());
return loc;
}
}
public long spill(long numBytes) throws IOException {
synchronized (this) {
if (!destructive || dataPages.size() == 1) {
return 0L;
}
updatePeakMemoryUsed();
// TODO: use existing ShuffleWriteMetrics
ShuffleWriteMetrics writeMetrics = new ShuffleWriteMetrics();
long released = 0L;
while (dataPages.size() > 0) {
MemoryBlock block = dataPages.getLast();
// The currentPage is used, cannot be released
if (block == currentPage) {
break;
}
Object base = block.getBaseObject();
long offset = block.getBaseOffset();
int numRecords = UnsafeAlignedOffset.getSize(base, offset);
int uaoSize = UnsafeAlignedOffset.getUaoSize();
offset += uaoSize;
final UnsafeSorterSpillWriter writer =
new UnsafeSorterSpillWriter(blockManager, 32 * 1024, writeMetrics, numRecords);
while (numRecords > 0) {
int length = UnsafeAlignedOffset.getSize(base, offset);
writer.write(base, offset + uaoSize, length, 0);
offset += uaoSize + length + 8;
numRecords--;
}
writer.close();
spillWriters.add(writer);
dataPages.removeLast();
released += block.size();
freePage(block);
if (released >= numBytes) {
break;
}
}
return released;
}
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
private void handleFailedDelete() {
// remove the spill file from disk
File file = spillWriters.removeFirst().getFile();
if (file != null && file.exists() && !file.delete()) {
logger.error("Was unable to delete spill file {}", file.getAbsolutePath());
}
}
}
/**
* Returns an iterator for iterating over the entries of this map.
*
* For efficiency, all calls to `next()` will return the same {@link Location} object.
*
* If any other lookups or operations are performed on this map while iterating over it, including
* `lookup()`, the behavior of the returned iterator is undefined.
*/
public MapIterator iterator() {
return new MapIterator(numValues, loc, false);
}
/**
* Returns a destructive iterator for iterating over the entries of this map. It frees each page
* as it moves onto next one. Notice: it is illegal to call any method on the map after
* `destructiveIterator()` has been called.
*
* For efficiency, all calls to `next()` will return the same {@link Location} object.
*
* If any other lookups or operations are performed on this map while iterating over it, including
* `lookup()`, the behavior of the returned iterator is undefined.
*/
public MapIterator destructiveIterator() {
updatePeakMemoryUsed();
return new MapIterator(numValues, loc, true);
}
/**
* Looks up a key, and return a {@link Location} handle that can be used to test existence
* and read/write values.
*
* This function always return the same {@link Location} instance to avoid object allocation.
*/
public Location lookup(Object keyBase, long keyOffset, int keyLength) {
safeLookup(keyBase, keyOffset, keyLength, loc,
Murmur3_x86_32.hashUnsafeWords(keyBase, keyOffset, keyLength, 42));
return loc;
}
/**
* Looks up a key, and return a {@link Location} handle that can be used to test existence
* and read/write values.
*
* This function always return the same {@link Location} instance to avoid object allocation.
*/
public Location lookup(Object keyBase, long keyOffset, int keyLength, int hash) {
safeLookup(keyBase, keyOffset, keyLength, loc, hash);
return loc;
}
/**
* Looks up a key, and saves the result in provided `loc`.
*
* This is a thread-safe version of `lookup`, could be used by multiple threads.
*/
public void safeLookup(Object keyBase, long keyOffset, int keyLength, Location loc, int hash) {
assert(longArray != null);
if (enablePerfMetrics) {
numKeyLookups++;
}
int pos = hash & mask;
int step = 1;
while (true) {
if (enablePerfMetrics) {
numProbes++;
}
if (longArray.get(pos * 2) == 0) {
// This is a new key.
loc.with(pos, hash, false);
return;
} else {
long stored = longArray.get(pos * 2 + 1);
if ((int) (stored) == hash) {
// Full hash code matches. Let's compare the keys for equality.
loc.with(pos, hash, true);
if (loc.getKeyLength() == keyLength) {
final boolean areEqual = ByteArrayMethods.arrayEquals(
keyBase,
keyOffset,
loc.getKeyBase(),
loc.getKeyOffset(),
keyLength
);
if (areEqual) {
return;
}
}
}
}
pos = (pos + step) & mask;
step++;
}
}
/**
* Handle returned by {@link BytesToBytesMap#lookup(Object, long, int)} function.
*/
public final class Location {
/** An index into the hash map's Long array */
private int pos;
/** True if this location points to a position where a key is defined, false otherwise */
private boolean isDefined;
/**
* The hashcode of the most recent key passed to
* {@link BytesToBytesMap#lookup(Object, long, int, int)}. Caching this hashcode here allows us
* to avoid re-hashing the key when storing a value for that key.
*/
private int keyHashcode;
private Object baseObject; // the base object for key and value
private long keyOffset;
private int keyLength;
private long valueOffset;
private int valueLength;
/**
* Memory page containing the record. Only set if created by {@link BytesToBytesMap#iterator()}.
*/
@Nullable private MemoryBlock memoryPage;
private void updateAddressesAndSizes(long fullKeyAddress) {
updateAddressesAndSizes(
taskMemoryManager.getPage(fullKeyAddress),
taskMemoryManager.getOffsetInPage(fullKeyAddress));
}
private void updateAddressesAndSizes(final Object base, long offset) {
baseObject = base;
final int totalLength = UnsafeAlignedOffset.getSize(base, offset);
int uaoSize = UnsafeAlignedOffset.getUaoSize();
offset += uaoSize;
keyLength = UnsafeAlignedOffset.getSize(base, offset);
offset += uaoSize;
keyOffset = offset;
valueOffset = offset + keyLength;
valueLength = totalLength - keyLength - uaoSize;
}
private Location with(int pos, int keyHashcode, boolean isDefined) {
assert(longArray != null);
this.pos = pos;
this.isDefined = isDefined;
this.keyHashcode = keyHashcode;
if (isDefined) {
final long fullKeyAddress = longArray.get(pos * 2);
updateAddressesAndSizes(fullKeyAddress);
}
return this;
}
private Location with(MemoryBlock page, long offsetInPage) {
this.isDefined = true;
this.memoryPage = page;
updateAddressesAndSizes(page.getBaseObject(), offsetInPage);
return this;
}
/**
* This is only used for spilling
*/
private Location with(Object base, long offset, int length) {
this.isDefined = true;
this.memoryPage = null;
baseObject = base;
int uaoSize = UnsafeAlignedOffset.getUaoSize();
keyOffset = offset + uaoSize;
keyLength = UnsafeAlignedOffset.getSize(base, offset);
valueOffset = offset + uaoSize + keyLength;
valueLength = length - uaoSize - keyLength;
return this;
}
/**
* Find the next pair that has the same key as current one.
*/
public boolean nextValue() {
assert isDefined;
long nextAddr = Platform.getLong(baseObject, valueOffset + valueLength);
if (nextAddr == 0) {
return false;
} else {
updateAddressesAndSizes(nextAddr);
return true;
}
}
/**
* Returns the memory page that contains the current record.
* This is only valid if this is returned by {@link BytesToBytesMap#iterator()}.
*/
public MemoryBlock getMemoryPage() {
return this.memoryPage;
}
/**
* Returns true if the key is defined at this position, and false otherwise.
*/
public boolean isDefined() {
return isDefined;
}
/**
* Returns the base object for key.
*/
public Object getKeyBase() {
assert (isDefined);
return baseObject;
}
/**
* Returns the offset for key.
*/
public long getKeyOffset() {
assert (isDefined);
return keyOffset;
}
/**
* Returns the base object for value.
*/
public Object getValueBase() {
assert (isDefined);
return baseObject;
}
/**
* Returns the offset for value.
*/
public long getValueOffset() {
assert (isDefined);
return valueOffset;
}
/**
* Returns the length of the key defined at this position.
* Unspecified behavior if the key is not defined.
*/
public int getKeyLength() {
assert (isDefined);
return keyLength;
}
/**
* Returns the length of the value defined at this position.
* Unspecified behavior if the key is not defined.
*/
public int getValueLength() {
assert (isDefined);
return valueLength;
}
/**
* Append a new value for the key. This method could be called multiple times for a given key.
* The return value indicates whether the put succeeded or whether it failed because additional
* memory could not be acquired.
* <p>
* It is only valid to call this method immediately after calling `lookup()` using the same key.
* </p>
* <p>
* The key and value must be word-aligned (that is, their sizes must be a multiple of 8).
* </p>
* <p>
* After calling this method, calls to `get[Key|Value]Address()` and `get[Key|Value]Length`
* will return information on the data stored by this `append` call.
* </p>
* <p>
* As an example usage, here's the proper way to store a new key:
* </p>
* <pre>
* Location loc = map.lookup(keyBase, keyOffset, keyLength);
* if (!loc.isDefined()) {
* if (!loc.append(keyBase, keyOffset, keyLength, ...)) {
* // handle failure to grow map (by spilling, for example)
* }
* }
* </pre>
* <p>
* Unspecified behavior if the key is not defined.
* </p>
*
* @return true if the put() was successful and false if the put() failed because memory could
* not be acquired.
*/
public boolean append(Object kbase, long koff, int klen, Object vbase, long voff, int vlen) {
assert (klen % 8 == 0);
assert (vlen % 8 == 0);
assert (longArray != null);
if (numKeys == MAX_CAPACITY
// The map could be reused from last spill (because of no enough memory to grow),
// then we don't try to grow again if hit the `growthThreshold`.
|| !canGrowArray && numKeys >= growthThreshold) {
return false;
}
// Here, we'll copy the data into our data pages. Because we only store a relative offset from
// the key address instead of storing the absolute address of the value, the key and value
// must be stored in the same memory page.
// (8 byte key length) (key) (value) (8 byte pointer to next value)
int uaoSize = UnsafeAlignedOffset.getUaoSize();
final long recordLength = (2L * uaoSize) + klen + vlen + 8;
if (currentPage == null || currentPage.size() - pageCursor < recordLength) {
if (!acquireNewPage(recordLength + uaoSize)) {
return false;
}
}
// --- Append the key and value data to the current data page --------------------------------
final Object base = currentPage.getBaseObject();
long offset = currentPage.getBaseOffset() + pageCursor;
final long recordOffset = offset;
UnsafeAlignedOffset.putSize(base, offset, klen + vlen + uaoSize);
UnsafeAlignedOffset.putSize(base, offset + uaoSize, klen);
offset += (2 * uaoSize);
Platform.copyMemory(kbase, koff, base, offset, klen);
offset += klen;
Platform.copyMemory(vbase, voff, base, offset, vlen);
offset += vlen;
// put this value at the beginning of the list
Platform.putLong(base, offset, isDefined ? longArray.get(pos * 2) : 0);
// --- Update bookkeeping data structures ----------------------------------------------------
offset = currentPage.getBaseOffset();
UnsafeAlignedOffset.putSize(base, offset, UnsafeAlignedOffset.getSize(base, offset) + 1);
pageCursor += recordLength;
final long storedKeyAddress = taskMemoryManager.encodePageNumberAndOffset(
currentPage, recordOffset);
longArray.set(pos * 2, storedKeyAddress);
updateAddressesAndSizes(storedKeyAddress);
numValues++;
if (!isDefined) {
numKeys++;
longArray.set(pos * 2 + 1, keyHashcode);
isDefined = true;
if (numKeys >= growthThreshold && longArray.size() < MAX_CAPACITY) {
try {
growAndRehash();
} catch (OutOfMemoryError oom) {
canGrowArray = false;
}
}
}
return true;
}
}
/**
* Acquire a new page from the memory manager.
* @return whether there is enough space to allocate the new page.
*/
private boolean acquireNewPage(long required) {
try {
currentPage = allocatePage(required);
} catch (OutOfMemoryError e) {
return false;
}
dataPages.add(currentPage);
UnsafeAlignedOffset.putSize(currentPage.getBaseObject(), currentPage.getBaseOffset(), 0);
pageCursor = UnsafeAlignedOffset.getUaoSize();
return true;
}
@Override
public long spill(long size, MemoryConsumer trigger) throws IOException {
if (trigger != this && destructiveIterator != null) {
return destructiveIterator.spill(size);
}
return 0L;
}
/**
* Allocate new data structures for this map. When calling this outside of the constructor,
* make sure to keep references to the old data structures so that you can free them.
*
* @param capacity the new map capacity
*/
private void allocate(int capacity) {
assert (capacity >= 0);
capacity = Math.max((int) Math.min(MAX_CAPACITY, ByteArrayMethods.nextPowerOf2(capacity)), 64);
assert (capacity <= MAX_CAPACITY);
longArray = allocateArray(capacity * 2);
longArray.zeroOut();
this.growthThreshold = (int) (capacity * loadFactor);
this.mask = capacity - 1;
}
/**
* Free all allocated memory associated with this map, including the storage for keys and values
* as well as the hash map array itself.
*
* This method is idempotent and can be called multiple times.
*/
public void free() {
updatePeakMemoryUsed();
if (longArray != null) {
freeArray(longArray);
longArray = null;
}
Iterator<MemoryBlock> dataPagesIterator = dataPages.iterator();
while (dataPagesIterator.hasNext()) {
MemoryBlock dataPage = dataPagesIterator.next();
dataPagesIterator.remove();
freePage(dataPage);
}
assert(dataPages.isEmpty());
while (!spillWriters.isEmpty()) {
File file = spillWriters.removeFirst().getFile();
if (file != null && file.exists()) {
if (!file.delete()) {
logger.error("Was unable to delete spill file {}", file.getAbsolutePath());
}
}
}
}
public TaskMemoryManager getTaskMemoryManager() {
return taskMemoryManager;
}
public long getPageSizeBytes() {
return pageSizeBytes;
}
/**
* Returns the total amount of memory, in bytes, consumed by this map's managed structures.
*/
public long getTotalMemoryConsumption() {
long totalDataPagesSize = 0L;
for (MemoryBlock dataPage : dataPages) {
totalDataPagesSize += dataPage.size();
}
return totalDataPagesSize + ((longArray != null) ? longArray.memoryBlock().size() : 0L);
}
private void updatePeakMemoryUsed() {
long mem = getTotalMemoryConsumption();
if (mem > peakMemoryUsedBytes) {
peakMemoryUsedBytes = mem;
}
}
/**
* Return the peak memory used so far, in bytes.
*/
public long getPeakMemoryUsedBytes() {
updatePeakMemoryUsed();
return peakMemoryUsedBytes;
}
/**
* Returns the average number of probes per key lookup.
*/
public double getAverageProbesPerLookup() {
if (!enablePerfMetrics) {
throw new IllegalStateException();
}
return (1.0 * numProbes) / numKeyLookups;
}
@VisibleForTesting
public int getNumDataPages() {
return dataPages.size();
}
/**
* Returns the underline long[] of longArray.
*/
public LongArray getArray() {
assert(longArray != null);
return longArray;
}
/**
* Reset this map to initialized state.
*/
public void reset() {
updatePeakMemoryUsed();
numKeys = 0;
numValues = 0;
freeArray(longArray);
while (dataPages.size() > 0) {
MemoryBlock dataPage = dataPages.removeLast();
freePage(dataPage);
}
allocate(initialCapacity);
canGrowArray = true;
currentPage = null;
pageCursor = 0;
}
/**
* Grows the size of the hash table and re-hash everything.
*/
@VisibleForTesting
void growAndRehash() {
assert(longArray != null);
// Store references to the old data structures to be used when we re-hash
final LongArray oldLongArray = longArray;
final int oldCapacity = (int) oldLongArray.size() / 2;
// Allocate the new data structures
allocate(Math.min(growthStrategy.nextCapacity(oldCapacity), MAX_CAPACITY));
// Re-mask (we don't recompute the hashcode because we stored all 32 bits of it)
for (int i = 0; i < oldLongArray.size(); i += 2) {
final long keyPointer = oldLongArray.get(i);
if (keyPointer == 0) {
continue;
}
final int hashcode = (int) oldLongArray.get(i + 1);
int newPos = hashcode & mask;
int step = 1;
while (longArray.get(newPos * 2) != 0) {
newPos = (newPos + step) & mask;
step++;
}
longArray.set(newPos * 2, keyPointer);
longArray.set(newPos * 2 + 1, hashcode);
}
freeArray(oldLongArray);
}
}
| 9,650 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/unsafe | Create_ds/spark/core/src/main/java/org/apache/spark/unsafe/map/HashMapGrowthStrategy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.map;
import org.apache.spark.unsafe.array.ByteArrayMethods;
/**
* Interface that defines how we can grow the size of a hash map when it is over a threshold.
*/
public interface HashMapGrowthStrategy {
int nextCapacity(int currentCapacity);
/**
* Double the size of the hash map every time.
*/
HashMapGrowthStrategy DOUBLING = new Doubling();
class Doubling implements HashMapGrowthStrategy {
private static final int ARRAY_MAX = ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH;
@Override
public int nextCapacity(int currentCapacity) {
assert (currentCapacity > 0);
int doubleCapacity = currentCapacity * 2;
// Guard against overflow
return (doubleCapacity > 0 && doubleCapacity <= ARRAY_MAX) ? doubleCapacity : ARRAY_MAX;
}
}
}
| 9,651 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/JavaSparkContextVarargsWorkaround.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java;
import java.util.ArrayList;
import java.util.List;
// See
// http://scala-programming-language.1934581.n4.nabble.com/Workaround-for-implementing-java-varargs-in-2-7-2-final-tp1944767p1944772.html
abstract class JavaSparkContextVarargsWorkaround {
@SafeVarargs
public final <T> JavaRDD<T> union(JavaRDD<T>... rdds) {
if (rdds.length == 0) {
throw new IllegalArgumentException("Union called on empty list");
}
List<JavaRDD<T>> rest = new ArrayList<>(rdds.length - 1);
for (int i = 1; i < rdds.length; i++) {
rest.add(rdds[i]);
}
return union(rdds[0], rest);
}
public JavaDoubleRDD union(JavaDoubleRDD... rdds) {
if (rdds.length == 0) {
throw new IllegalArgumentException("Union called on empty list");
}
List<JavaDoubleRDD> rest = new ArrayList<>(rdds.length - 1);
for (int i = 1; i < rdds.length; i++) {
rest.add(rdds[i]);
}
return union(rdds[0], rest);
}
@SafeVarargs
public final <K, V> JavaPairRDD<K, V> union(JavaPairRDD<K, V>... rdds) {
if (rdds.length == 0) {
throw new IllegalArgumentException("Union called on empty list");
}
List<JavaPairRDD<K, V>> rest = new ArrayList<>(rdds.length - 1);
for (int i = 1; i < rdds.length; i++) {
rest.add(rdds[i]);
}
return union(rdds[0], rest);
}
// These methods take separate "first" and "rest" elements to avoid having the same type erasure
public abstract <T> JavaRDD<T> union(JavaRDD<T> first, List<JavaRDD<T>> rest);
public abstract JavaDoubleRDD union(JavaDoubleRDD first, List<JavaDoubleRDD> rest);
public abstract <K, V> JavaPairRDD<K, V> union(JavaPairRDD<K, V> first, List<JavaPairRDD<K, V>>
rest);
}
| 9,652 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/Optional.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java;
import java.io.Serializable;
import java.util.Objects;
import com.google.common.base.Preconditions;
/**
* <p>Like {@code java.util.Optional} in Java 8, {@code scala.Option} in Scala, and
* {@code com.google.common.base.Optional} in Google Guava, this class represents a
* value of a given type that may or may not exist. It is used in methods that wish
* to optionally return a value, in preference to returning {@code null}.</p>
*
* <p>In fact, the class here is a reimplementation of the essential API of both
* {@code java.util.Optional} and {@code com.google.common.base.Optional}. From
* {@code java.util.Optional}, it implements:</p>
*
* <ul>
* <li>{@link #empty()}</li>
* <li>{@link #of(Object)}</li>
* <li>{@link #ofNullable(Object)}</li>
* <li>{@link #get()}</li>
* <li>{@link #orElse(Object)}</li>
* <li>{@link #isPresent()}</li>
* </ul>
*
* <p>From {@code com.google.common.base.Optional} it implements:</p>
*
* <ul>
* <li>{@link #absent()}</li>
* <li>{@link #of(Object)}</li>
* <li>{@link #fromNullable(Object)}</li>
* <li>{@link #get()}</li>
* <li>{@link #or(Object)}</li>
* <li>{@link #orNull()}</li>
* <li>{@link #isPresent()}</li>
* </ul>
*
* <p>{@code java.util.Optional} itself was not used because at the time, the
* project did not require Java 8. Using {@code com.google.common.base.Optional}
* has in the past caused serious library version conflicts with Guava that can't
* be resolved by shading. Hence this work-alike clone.</p>
*
* @param <T> type of value held inside
*/
public final class Optional<T> implements Serializable {
private static final Optional<?> EMPTY = new Optional<>();
private final T value;
private Optional() {
this.value = null;
}
private Optional(T value) {
Preconditions.checkNotNull(value);
this.value = value;
}
// java.util.Optional API (subset)
/**
* @return an empty {@code Optional}
*/
public static <T> Optional<T> empty() {
@SuppressWarnings("unchecked")
Optional<T> t = (Optional<T>) EMPTY;
return t;
}
/**
* @param value non-null value to wrap
* @return {@code Optional} wrapping this value
* @throws NullPointerException if value is null
*/
public static <T> Optional<T> of(T value) {
return new Optional<>(value);
}
/**
* @param value value to wrap, which may be null
* @return {@code Optional} wrapping this value, which may be empty
*/
public static <T> Optional<T> ofNullable(T value) {
if (value == null) {
return empty();
} else {
return of(value);
}
}
/**
* @return the value wrapped by this {@code Optional}
* @throws NullPointerException if this is empty (contains no value)
*/
public T get() {
Preconditions.checkNotNull(value);
return value;
}
/**
* @param other value to return if this is empty
* @return this {@code Optional}'s value if present, or else the given value
*/
public T orElse(T other) {
return value != null ? value : other;
}
/**
* @return true iff this {@code Optional} contains a value (non-empty)
*/
public boolean isPresent() {
return value != null;
}
// Guava API (subset)
// of(), get() and isPresent() are identically present in the Guava API
/**
* @return an empty {@code Optional}
*/
public static <T> Optional<T> absent() {
return empty();
}
/**
* @param value value to wrap, which may be null
* @return {@code Optional} wrapping this value, which may be empty
*/
public static <T> Optional<T> fromNullable(T value) {
return ofNullable(value);
}
/**
* @param other value to return if this is empty
* @return this {@code Optional}'s value if present, or else the given value
*/
public T or(T other) {
return value != null ? value : other;
}
/**
* @return this {@code Optional}'s value if present, or else null
*/
public T orNull() {
return value;
}
// Common methods
@Override
public boolean equals(Object obj) {
if (!(obj instanceof Optional)) {
return false;
}
Optional<?> other = (Optional<?>) obj;
return Objects.equals(value, other.value);
}
@Override
public int hashCode() {
return value == null ? 0 : value.hashCode();
}
@Override
public String toString() {
return value == null ? "Optional.empty" : String.format("Optional[%s]", value);
}
}
| 9,653 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/JavaFutureAction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java;
import java.util.List;
import java.util.concurrent.Future;
public interface JavaFutureAction<T> extends Future<T> {
/**
* Returns the job IDs run by the underlying async operation.
*
* This returns the current snapshot of the job list. Certain operations may run multiple
* jobs, so multiple calls to this method may return different lists.
*/
List<Integer> jobIds();
}
| 9,654 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/StorageLevels.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java;
import org.apache.spark.storage.StorageLevel;
/**
* Expose some commonly useful storage level constants.
*/
public class StorageLevels {
public static final StorageLevel NONE = create(false, false, false, false, 1);
public static final StorageLevel DISK_ONLY = create(true, false, false, false, 1);
public static final StorageLevel DISK_ONLY_2 = create(true, false, false, false, 2);
public static final StorageLevel MEMORY_ONLY = create(false, true, false, true, 1);
public static final StorageLevel MEMORY_ONLY_2 = create(false, true, false, true, 2);
public static final StorageLevel MEMORY_ONLY_SER = create(false, true, false, false, 1);
public static final StorageLevel MEMORY_ONLY_SER_2 = create(false, true, false, false, 2);
public static final StorageLevel MEMORY_AND_DISK = create(true, true, false, true, 1);
public static final StorageLevel MEMORY_AND_DISK_2 = create(true, true, false, true, 2);
public static final StorageLevel MEMORY_AND_DISK_SER = create(true, true, false, false, 1);
public static final StorageLevel MEMORY_AND_DISK_SER_2 = create(true, true, false, false, 2);
public static final StorageLevel OFF_HEAP = create(true, true, true, false, 1);
/**
* Create a new StorageLevel object.
* @param useDisk saved to disk, if true
* @param useMemory saved to on-heap memory, if true
* @param useOffHeap saved to off-heap memory, if true
* @param deserialized saved as deserialized objects, if true
* @param replication replication factor
*/
public static StorageLevel create(
boolean useDisk,
boolean useMemory,
boolean useOffHeap,
boolean deserialized,
int replication) {
return StorageLevel.apply(useDisk, useMemory, useOffHeap, deserialized, replication);
}
}
| 9,655 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/Function0.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
/**
* A zero-argument function that returns an R.
*/
@FunctionalInterface
public interface Function0<R> extends Serializable {
R call() throws Exception;
}
| 9,656 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/MapGroupsFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
import java.util.Iterator;
/**
* Base interface for a map function used in GroupedDataset's mapGroup function.
*/
@FunctionalInterface
public interface MapGroupsFunction<K, V, R> extends Serializable {
R call(K key, Iterator<V> values) throws Exception;
}
| 9,657 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/ForeachFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
/**
* Base interface for a function used in Dataset's foreach function.
*
* Spark will invoke the call function on each element in the input Dataset.
*/
@FunctionalInterface
public interface ForeachFunction<T> extends Serializable {
void call(T t) throws Exception;
}
| 9,658 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/VoidFunction2.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
/**
* A two-argument function that takes arguments of type T1 and T2 with no return value.
*/
@FunctionalInterface
public interface VoidFunction2<T1, T2> extends Serializable {
void call(T1 v1, T2 v2) throws Exception;
}
| 9,659 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/PairFlatMapFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
import java.util.Iterator;
import scala.Tuple2;
/**
* A function that returns zero or more key-value pair records from each input record. The
* key-value pairs are represented as scala.Tuple2 objects.
*/
@FunctionalInterface
public interface PairFlatMapFunction<T, K, V> extends Serializable {
Iterator<Tuple2<K, V>> call(T t) throws Exception;
}
| 9,660 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/PairFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
import scala.Tuple2;
/**
* A function that returns key-value pairs (Tuple2<K, V>), and can be used to
* construct PairRDDs.
*/
@FunctionalInterface
public interface PairFunction<T, K, V> extends Serializable {
Tuple2<K, V> call(T t) throws Exception;
}
| 9,661 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/MapPartitionsFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
import java.util.Iterator;
/**
* Base interface for function used in Dataset's mapPartitions.
*/
@FunctionalInterface
public interface MapPartitionsFunction<T, U> extends Serializable {
Iterator<U> call(Iterator<T> input) throws Exception;
}
| 9,662 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/DoubleFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
/**
* A function that returns Doubles, and can be used to construct DoubleRDDs.
*/
@FunctionalInterface
public interface DoubleFunction<T> extends Serializable {
double call(T t) throws Exception;
}
| 9,663 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/FlatMapFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
import java.util.Iterator;
/**
* A function that returns zero or more output records from each input record.
*/
@FunctionalInterface
public interface FlatMapFunction<T, R> extends Serializable {
Iterator<R> call(T t) throws Exception;
}
| 9,664 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/FilterFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
/**
* Base interface for a function used in Dataset's filter function.
*
* If the function returns true, the element is included in the returned Dataset.
*/
@FunctionalInterface
public interface FilterFunction<T> extends Serializable {
boolean call(T value) throws Exception;
}
| 9,665 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/Function4.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
/**
* A four-argument function that takes arguments of type T1, T2, T3 and T4 and returns an R.
*/
@FunctionalInterface
public interface Function4<T1, T2, T3, T4, R> extends Serializable {
R call(T1 v1, T2 v2, T3 v3, T4 v4) throws Exception;
}
| 9,666 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/VoidFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
/**
* A function with no return value.
*/
@FunctionalInterface
public interface VoidFunction<T> extends Serializable {
void call(T t) throws Exception;
}
| 9,667 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/ReduceFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
/**
* Base interface for function used in Dataset's reduce.
*/
@FunctionalInterface
public interface ReduceFunction<T> extends Serializable {
T call(T v1, T v2) throws Exception;
}
| 9,668 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/CoGroupFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
import java.util.Iterator;
/**
* A function that returns zero or more output records from each grouping key and its values from 2
* Datasets.
*/
@FunctionalInterface
public interface CoGroupFunction<K, V1, V2, R> extends Serializable {
Iterator<R> call(K key, Iterator<V1> left, Iterator<V2> right) throws Exception;
}
| 9,669 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/FlatMapFunction2.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
import java.util.Iterator;
/**
* A function that takes two inputs and returns zero or more output records.
*/
@FunctionalInterface
public interface FlatMapFunction2<T1, T2, R> extends Serializable {
Iterator<R> call(T1 t1, T2 t2) throws Exception;
}
| 9,670 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/DoubleFlatMapFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
import java.util.Iterator;
/**
* A function that returns zero or more records of type Double from each input record.
*/
@FunctionalInterface
public interface DoubleFlatMapFunction<T> extends Serializable {
Iterator<Double> call(T t) throws Exception;
}
| 9,671 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/ForeachPartitionFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
import java.util.Iterator;
/**
* Base interface for a function used in Dataset's foreachPartition function.
*/
@FunctionalInterface
public interface ForeachPartitionFunction<T> extends Serializable {
void call(Iterator<T> t) throws Exception;
}
| 9,672 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/Function2.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
/**
* A two-argument function that takes arguments of type T1 and T2 and returns an R.
*/
@FunctionalInterface
public interface Function2<T1, T2, R> extends Serializable {
R call(T1 v1, T2 v2) throws Exception;
}
| 9,673 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/MapFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
/**
* Base interface for a map function used in Dataset's map function.
*/
@FunctionalInterface
public interface MapFunction<T, U> extends Serializable {
U call(T value) throws Exception;
}
| 9,674 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/Function.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
/**
* Base interface for functions whose return types do not create special RDDs. PairFunction and
* DoubleFunction are handled separately, to allow PairRDDs and DoubleRDDs to be constructed
* when mapping RDDs of other types.
*/
@FunctionalInterface
public interface Function<T1, R> extends Serializable {
R call(T1 v1) throws Exception;
}
| 9,675 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/Function3.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
/**
* A three-argument function that takes arguments of type T1, T2 and T3 and returns an R.
*/
@FunctionalInterface
public interface Function3<T1, T2, T3, R> extends Serializable {
R call(T1 v1, T2 v2, T3 v3) throws Exception;
}
| 9,676 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Set of interfaces to represent functions in Spark's Java API. Users create implementations of
* these interfaces to pass functions to various Java API methods for Spark. Please visit Spark's
* Java programming guide for more details.
*/
package org.apache.spark.api.java.function;
| 9,677 |
0 | Create_ds/spark/core/src/main/java/org/apache/spark/api/java | Create_ds/spark/core/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java.function;
import java.io.Serializable;
import java.util.Iterator;
/**
* A function that returns zero or more output records from each grouping key and its values.
*/
@FunctionalInterface
public interface FlatMapGroupsFunction<K, V, R> extends Serializable {
Iterator<R> call(K key, Iterator<V> values) throws Exception;
}
| 9,678 |
0 | Create_ds/spark/core/src/main/scala/org/apache/spark | Create_ds/spark/core/src/main/scala/org/apache/spark/serializer/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Pluggable serializers for RDD and shuffle data.
*/
package org.apache.spark.serializer;
| 9,679 |
0 | Create_ds/spark/core/src/main/scala/org/apache/spark | Create_ds/spark/core/src/main/scala/org/apache/spark/util/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Spark utilities.
*/
package org.apache.spark.util;
| 9,680 |
0 | Create_ds/spark/core/src/main/scala/org/apache/spark/util | Create_ds/spark/core/src/main/scala/org/apache/spark/util/random/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Utilities for random number generation.
*/
package org.apache.spark.util.random;
| 9,681 |
0 | Create_ds/spark/core/src/main/scala/org/apache/spark | Create_ds/spark/core/src/main/scala/org/apache/spark/io/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* IO codecs used for compression.
*/
package org.apache.spark.io;
| 9,682 |
0 | Create_ds/spark/core/src/main/scala/org/apache/spark | Create_ds/spark/core/src/main/scala/org/apache/spark/scheduler/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Spark's DAG scheduler.
*/
package org.apache.spark.scheduler;
| 9,683 |
0 | Create_ds/spark/core/src/main/scala/org/apache/spark | Create_ds/spark/core/src/main/scala/org/apache/spark/rdd/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides implementation's of various RDDs.
*/
package org.apache.spark.rdd;
| 9,684 |
0 | Create_ds/spark/core/src/main/scala/org/apache/spark/api | Create_ds/spark/core/src/main/scala/org/apache/spark/api/java/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Spark Java programming APIs.
*/
package org.apache.spark.api.java;
| 9,685 |
0 | Create_ds/spark/core/src/main/scala/org/apache/spark | Create_ds/spark/core/src/main/scala/org/apache/spark/executor/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Package for executor components used with various cluster managers.
*/
package org.apache.spark.executor;
| 9,686 |
0 | Create_ds/spark/core/src/main/scala/org/apache/spark | Create_ds/spark/core/src/main/scala/org/apache/spark/broadcast/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Spark's broadcast variables, used to broadcast immutable datasets to all nodes.
*/
package org.apache.spark.broadcast;
| 9,687 |
0 | Create_ds/spark/graphx/src/main/java/org/apache/spark | Create_ds/spark/graphx/src/main/java/org/apache/spark/graphx/TripletFields.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.graphx;
import java.io.Serializable;
/**
* Represents a subset of the fields of an [[EdgeTriplet]] or [[EdgeContext]]. This allows the
* system to populate only those fields for efficiency.
*/
public class TripletFields implements Serializable {
/** Indicates whether the source vertex attribute is included. */
public final boolean useSrc;
/** Indicates whether the destination vertex attribute is included. */
public final boolean useDst;
/** Indicates whether the edge attribute is included. */
public final boolean useEdge;
/** Constructs a default TripletFields in which all fields are included. */
public TripletFields() {
this(true, true, true);
}
public TripletFields(boolean useSrc, boolean useDst, boolean useEdge) {
this.useSrc = useSrc;
this.useDst = useDst;
this.useEdge = useEdge;
}
/**
* None of the triplet fields are exposed.
*/
public static final TripletFields None = new TripletFields(false, false, false);
/**
* Expose only the edge field and not the source or destination field.
*/
public static final TripletFields EdgeOnly = new TripletFields(false, false, true);
/**
* Expose the source and edge fields but not the destination field. (Same as Src)
*/
public static final TripletFields Src = new TripletFields(true, false, true);
/**
* Expose the destination and edge fields but not the source field. (Same as Dst)
*/
public static final TripletFields Dst = new TripletFields(false, true, true);
/**
* Expose all the fields (source, edge, and destination).
*/
public static final TripletFields All = new TripletFields(true, true, true);
}
| 9,688 |
0 | Create_ds/spark/graphx/src/main/java/org/apache/spark/graphx | Create_ds/spark/graphx/src/main/java/org/apache/spark/graphx/impl/EdgeActiveness.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.graphx.impl;
/**
* Criteria for filtering edges based on activeness. For internal use only.
*/
public enum EdgeActiveness {
/** Neither the source vertex nor the destination vertex need be active. */
Neither,
/** The source vertex must be active. */
SrcOnly,
/** The destination vertex must be active. */
DstOnly,
/** Both vertices must be active. */
Both,
/** At least one vertex must be active. */
Either
}
| 9,689 |
0 | Create_ds/spark/graphx/src/main/scala/org/apache/spark | Create_ds/spark/graphx/src/main/scala/org/apache/spark/graphx/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* ALPHA COMPONENT
* GraphX is a graph processing framework built on top of Spark.
*/
package org.apache.spark.graphx;
| 9,690 |
0 | Create_ds/spark/graphx/src/main/scala/org/apache/spark/graphx | Create_ds/spark/graphx/src/main/scala/org/apache/spark/graphx/util/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Collections of utilities used by graphx.
*/
package org.apache.spark.graphx.util;
| 9,691 |
0 | Create_ds/spark/graphx/src/main/scala/org/apache/spark/graphx | Create_ds/spark/graphx/src/main/scala/org/apache/spark/graphx/lib/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Various analytics functions for graphs.
*/
package org.apache.spark.graphx.lib;
| 9,692 |
0 | Create_ds/spark/mllib/src/test/java/org/apache | Create_ds/spark/mllib/src/test/java/org/apache/spark/SharedSparkSession.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark;
import java.io.IOException;
import java.io.Serializable;
import org.junit.After;
import org.junit.Before;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.SparkSession;
public abstract class SharedSparkSession implements Serializable {
protected transient SparkSession spark;
protected transient JavaSparkContext jsc;
@Before
public void setUp() throws IOException {
spark = SparkSession.builder()
.master("local[2]")
.appName(getClass().getSimpleName())
.getOrCreate();
jsc = new JavaSparkContext(spark.sparkContext());
}
@After
public void tearDown() {
try {
spark.stop();
spark = null;
} finally {
SparkSession.clearDefaultSession();
SparkSession.clearActiveSession();
}
}
}
| 9,693 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/classification/JavaLogisticRegressionSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.classification;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.regression.LabeledPoint;
public class JavaLogisticRegressionSuite extends SharedSparkSession {
int validatePrediction(List<LabeledPoint> validationData, LogisticRegressionModel model) {
int numAccurate = 0;
for (LabeledPoint point : validationData) {
Double prediction = model.predict(point.features());
if (prediction == point.label()) {
numAccurate++;
}
}
return numAccurate;
}
@Test
public void runLRUsingConstructor() {
int nPoints = 10000;
double A = 2.0;
double B = -1.5;
JavaRDD<LabeledPoint> testRDD = jsc.parallelize(
LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
List<LabeledPoint> validationData =
LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 17);
LogisticRegressionWithSGD lrImpl = new LogisticRegressionWithSGD();
lrImpl.setIntercept(true);
lrImpl.optimizer().setStepSize(1.0)
.setRegParam(1.0)
.setNumIterations(100);
LogisticRegressionModel model = lrImpl.run(testRDD.rdd());
int numAccurate = validatePrediction(validationData, model);
Assert.assertTrue(numAccurate > nPoints * 4.0 / 5.0);
}
@Test
public void runLRUsingStaticMethods() {
int nPoints = 10000;
double A = 0.0;
double B = -2.5;
JavaRDD<LabeledPoint> testRDD = jsc.parallelize(
LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
List<LabeledPoint> validationData =
LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 17);
LogisticRegressionModel model = LogisticRegressionWithSGD.train(
testRDD.rdd(), 100, 1.0, 1.0);
int numAccurate = validatePrediction(validationData, model);
Assert.assertTrue(numAccurate > nPoints * 4.0 / 5.0);
}
}
| 9,694 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/classification/JavaStreamingLogisticRegressionSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.classification;
import java.util.Arrays;
import java.util.List;
import scala.Tuple2;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.apache.spark.SparkConf;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.linalg.Vectors;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import static org.apache.spark.streaming.JavaTestUtils.*;
public class JavaStreamingLogisticRegressionSuite {
protected transient JavaStreamingContext ssc;
@Before
public void setUp() {
SparkConf conf = new SparkConf()
.setMaster("local[2]")
.setAppName("test")
.set("spark.streaming.clock", "org.apache.spark.util.ManualClock");
ssc = new JavaStreamingContext(conf, new Duration(1000));
ssc.checkpoint("checkpoint");
}
@After
public void tearDown() {
ssc.stop();
ssc = null;
}
@Test
@SuppressWarnings("unchecked")
public void javaAPI() {
List<LabeledPoint> trainingBatch = Arrays.asList(
new LabeledPoint(1.0, Vectors.dense(1.0)),
new LabeledPoint(0.0, Vectors.dense(0.0)));
JavaDStream<LabeledPoint> training =
attachTestInputStream(ssc, Arrays.asList(trainingBatch, trainingBatch), 2);
List<Tuple2<Integer, Vector>> testBatch = Arrays.asList(
new Tuple2<>(10, Vectors.dense(1.0)),
new Tuple2<>(11, Vectors.dense(0.0)));
JavaPairDStream<Integer, Vector> test = JavaPairDStream.fromJavaDStream(
attachTestInputStream(ssc, Arrays.asList(testBatch, testBatch), 2));
StreamingLogisticRegressionWithSGD slr = new StreamingLogisticRegressionWithSGD()
.setNumIterations(2)
.setInitialWeights(Vectors.dense(0.0));
slr.trainOn(training);
JavaPairDStream<Integer, Double> prediction = slr.predictOnValues(test);
attachTestOutputStream(prediction.count());
runStreams(ssc, 2, 2);
}
}
| 9,695 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/classification/JavaNaiveBayesSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.classification;
import java.util.Arrays;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.linalg.Vectors;
import org.apache.spark.mllib.regression.LabeledPoint;
public class JavaNaiveBayesSuite extends SharedSparkSession {
private static final List<LabeledPoint> POINTS = Arrays.asList(
new LabeledPoint(0, Vectors.dense(1.0, 0.0, 0.0)),
new LabeledPoint(0, Vectors.dense(2.0, 0.0, 0.0)),
new LabeledPoint(1, Vectors.dense(0.0, 1.0, 0.0)),
new LabeledPoint(1, Vectors.dense(0.0, 2.0, 0.0)),
new LabeledPoint(2, Vectors.dense(0.0, 0.0, 1.0)),
new LabeledPoint(2, Vectors.dense(0.0, 0.0, 2.0))
);
private static int validatePrediction(List<LabeledPoint> points, NaiveBayesModel model) {
int correct = 0;
for (LabeledPoint p : points) {
if (model.predict(p.features()) == p.label()) {
correct += 1;
}
}
return correct;
}
@Test
public void runUsingConstructor() {
JavaRDD<LabeledPoint> testRDD = jsc.parallelize(POINTS, 2).cache();
NaiveBayes nb = new NaiveBayes().setLambda(1.0);
NaiveBayesModel model = nb.run(testRDD.rdd());
int numAccurate = validatePrediction(POINTS, model);
Assert.assertEquals(POINTS.size(), numAccurate);
}
@Test
public void runUsingStaticMethods() {
JavaRDD<LabeledPoint> testRDD = jsc.parallelize(POINTS, 2).cache();
NaiveBayesModel model1 = NaiveBayes.train(testRDD.rdd());
int numAccurate1 = validatePrediction(POINTS, model1);
Assert.assertEquals(POINTS.size(), numAccurate1);
NaiveBayesModel model2 = NaiveBayes.train(testRDD.rdd(), 0.5);
int numAccurate2 = validatePrediction(POINTS, model2);
Assert.assertEquals(POINTS.size(), numAccurate2);
}
@Test
public void testPredictJavaRDD() {
JavaRDD<LabeledPoint> examples = jsc.parallelize(POINTS, 2).cache();
NaiveBayesModel model = NaiveBayes.train(examples.rdd());
JavaRDD<Vector> vectors = examples.map(LabeledPoint::features);
JavaRDD<Double> predictions = model.predict(vectors);
// Should be able to get the first prediction.
predictions.first();
}
@Test
public void testModelTypeSetters() {
NaiveBayes nb = new NaiveBayes()
.setModelType("bernoulli")
.setModelType("multinomial");
}
}
| 9,696 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/classification/JavaSVMSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.classification;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.regression.LabeledPoint;
public class JavaSVMSuite extends SharedSparkSession {
int validatePrediction(List<LabeledPoint> validationData, SVMModel model) {
int numAccurate = 0;
for (LabeledPoint point : validationData) {
Double prediction = model.predict(point.features());
if (prediction == point.label()) {
numAccurate++;
}
}
return numAccurate;
}
@Test
public void runSVMUsingConstructor() {
int nPoints = 10000;
double A = 2.0;
double[] weights = {-1.5, 1.0};
JavaRDD<LabeledPoint> testRDD = jsc.parallelize(SVMSuite.generateSVMInputAsList(A,
weights, nPoints, 42), 2).cache();
List<LabeledPoint> validationData =
SVMSuite.generateSVMInputAsList(A, weights, nPoints, 17);
SVMWithSGD svmSGDImpl = new SVMWithSGD();
svmSGDImpl.setIntercept(true);
svmSGDImpl.optimizer().setStepSize(1.0)
.setRegParam(1.0)
.setNumIterations(100);
SVMModel model = svmSGDImpl.run(testRDD.rdd());
int numAccurate = validatePrediction(validationData, model);
Assert.assertTrue(numAccurate > nPoints * 4.0 / 5.0);
}
@Test
public void runSVMUsingStaticMethods() {
int nPoints = 10000;
double A = 0.0;
double[] weights = {-1.5, 1.0};
JavaRDD<LabeledPoint> testRDD = jsc.parallelize(SVMSuite.generateSVMInputAsList(A,
weights, nPoints, 42), 2).cache();
List<LabeledPoint> validationData =
SVMSuite.generateSVMInputAsList(A, weights, nPoints, 17);
SVMModel model = SVMWithSGD.train(testRDD.rdd(), 100, 1.0, 1.0, 1.0);
int numAccurate = validatePrediction(validationData, model);
Assert.assertTrue(numAccurate > nPoints * 4.0 / 5.0);
}
}
| 9,697 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/recommendation/JavaALSSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.recommendation;
import java.util.ArrayList;
import java.util.List;
import scala.Tuple2;
import scala.Tuple3;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
public class JavaALSSuite extends SharedSparkSession {
private void validatePrediction(
MatrixFactorizationModel model,
int users,
int products,
double[] trueRatings,
double matchThreshold,
boolean implicitPrefs,
double[] truePrefs) {
List<Tuple2<Integer, Integer>> localUsersProducts = new ArrayList<>(users * products);
for (int u = 0; u < users; ++u) {
for (int p = 0; p < products; ++p) {
localUsersProducts.add(new Tuple2<>(u, p));
}
}
JavaPairRDD<Integer, Integer> usersProducts = jsc.parallelizePairs(localUsersProducts);
List<Rating> predictedRatings = model.predict(usersProducts).collect();
Assert.assertEquals(users * products, predictedRatings.size());
if (!implicitPrefs) {
for (Rating r : predictedRatings) {
double prediction = r.rating();
double correct = trueRatings[r.product() * users + r.user()];
Assert.assertTrue(String.format("Prediction=%2.4f not below match threshold of %2.2f",
prediction, matchThreshold), Math.abs(prediction - correct) < matchThreshold);
}
} else {
// For implicit prefs we use the confidence-weighted RMSE to test
// (ref Mahout's implicit ALS tests)
double sqErr = 0.0;
double denom = 0.0;
for (Rating r : predictedRatings) {
double prediction = r.rating();
double truePref = truePrefs[r.product() * users + r.user()];
double confidence = 1.0 +
/* alpha = 1.0 * ... */ Math.abs(trueRatings[r.product() * users + r.user()]);
double err = confidence * (truePref - prediction) * (truePref - prediction);
sqErr += err;
denom += confidence;
}
double rmse = Math.sqrt(sqErr / denom);
Assert.assertTrue(String.format("Confidence-weighted RMSE=%2.4f above threshold of %2.2f",
rmse, matchThreshold), rmse < matchThreshold);
}
}
@Test
public void runALSUsingStaticMethods() {
int features = 1;
int iterations = 15;
int users = 50;
int products = 100;
Tuple3<List<Rating>, double[], double[]> testData =
ALSSuite.generateRatingsAsJava(users, products, features, 0.7, false, false);
JavaRDD<Rating> data = jsc.parallelize(testData._1());
MatrixFactorizationModel model = ALS.train(data.rdd(), features, iterations);
validatePrediction(model, users, products, testData._2(), 0.3, false, testData._3());
}
@Test
public void runALSUsingConstructor() {
int features = 2;
int iterations = 15;
int users = 100;
int products = 200;
Tuple3<List<Rating>, double[], double[]> testData =
ALSSuite.generateRatingsAsJava(users, products, features, 0.7, false, false);
JavaRDD<Rating> data = jsc.parallelize(testData._1());
MatrixFactorizationModel model = new ALS().setRank(features)
.setIterations(iterations)
.run(data);
validatePrediction(model, users, products, testData._2(), 0.3, false, testData._3());
}
@Test
public void runImplicitALSUsingStaticMethods() {
int features = 1;
int iterations = 15;
int users = 80;
int products = 160;
Tuple3<List<Rating>, double[], double[]> testData =
ALSSuite.generateRatingsAsJava(users, products, features, 0.7, true, false);
JavaRDD<Rating> data = jsc.parallelize(testData._1());
MatrixFactorizationModel model = ALS.trainImplicit(data.rdd(), features, iterations);
validatePrediction(model, users, products, testData._2(), 0.4, true, testData._3());
}
@Test
public void runImplicitALSUsingConstructor() {
int features = 2;
int iterations = 15;
int users = 100;
int products = 200;
Tuple3<List<Rating>, double[], double[]> testData =
ALSSuite.generateRatingsAsJava(users, products, features, 0.7, true, false);
JavaRDD<Rating> data = jsc.parallelize(testData._1());
MatrixFactorizationModel model = new ALS().setRank(features)
.setIterations(iterations)
.setImplicitPrefs(true)
.run(data.rdd());
validatePrediction(model, users, products, testData._2(), 0.4, true, testData._3());
}
@Test
public void runImplicitALSWithNegativeWeight() {
int features = 2;
int iterations = 15;
int users = 80;
int products = 160;
Tuple3<List<Rating>, double[], double[]> testData =
ALSSuite.generateRatingsAsJava(users, products, features, 0.7, true, true);
JavaRDD<Rating> data = jsc.parallelize(testData._1());
MatrixFactorizationModel model = new ALS().setRank(features)
.setIterations(iterations)
.setImplicitPrefs(true)
.setSeed(8675309L)
.run(data.rdd());
validatePrediction(model, users, products, testData._2(), 0.4, true, testData._3());
}
@Test
public void runRecommend() {
int features = 5;
int iterations = 10;
int users = 200;
int products = 50;
List<Rating> testData = ALSSuite.generateRatingsAsJava(
users, products, features, 0.7, true, false)._1();
JavaRDD<Rating> data = jsc.parallelize(testData);
MatrixFactorizationModel model = new ALS().setRank(features)
.setIterations(iterations)
.setImplicitPrefs(true)
.setSeed(8675309L)
.run(data.rdd());
validateRecommendations(model.recommendProducts(1, 10), 10);
validateRecommendations(model.recommendUsers(1, 20), 20);
}
private static void validateRecommendations(Rating[] recommendations, int howMany) {
Assert.assertEquals(howMany, recommendations.length);
for (int i = 1; i < recommendations.length; i++) {
Assert.assertTrue(recommendations[i - 1].rating() >= recommendations[i].rating());
}
Assert.assertTrue(recommendations[0].rating() > 0.7);
}
}
| 9,698 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/tree/JavaDecisionTreeSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.tree;
import java.util.HashMap;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.tree.configuration.Algo;
import org.apache.spark.mllib.tree.configuration.Strategy;
import org.apache.spark.mllib.tree.impurity.Gini;
import org.apache.spark.mllib.tree.model.DecisionTreeModel;
public class JavaDecisionTreeSuite extends SharedSparkSession {
private static int validatePrediction(
List<LabeledPoint> validationData, DecisionTreeModel model) {
int numCorrect = 0;
for (LabeledPoint point : validationData) {
Double prediction = model.predict(point.features());
if (prediction == point.label()) {
numCorrect++;
}
}
return numCorrect;
}
@Test
public void runDTUsingConstructor() {
List<LabeledPoint> arr = DecisionTreeSuite.generateCategoricalDataPointsAsJavaList();
JavaRDD<LabeledPoint> rdd = jsc.parallelize(arr);
HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
categoricalFeaturesInfo.put(1, 2); // feature 1 has 2 categories
int maxDepth = 4;
int numClasses = 2;
int maxBins = 100;
Strategy strategy = new Strategy(Algo.Classification(), Gini.instance(), maxDepth, numClasses,
maxBins, categoricalFeaturesInfo);
DecisionTree learner = new DecisionTree(strategy);
DecisionTreeModel model = learner.run(rdd.rdd());
int numCorrect = validatePrediction(arr, model);
Assert.assertEquals(numCorrect, rdd.count());
}
@Test
public void runDTUsingStaticMethods() {
List<LabeledPoint> arr = DecisionTreeSuite.generateCategoricalDataPointsAsJavaList();
JavaRDD<LabeledPoint> rdd = jsc.parallelize(arr);
HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
categoricalFeaturesInfo.put(1, 2); // feature 1 has 2 categories
int maxDepth = 4;
int numClasses = 2;
int maxBins = 100;
Strategy strategy = new Strategy(Algo.Classification(), Gini.instance(), maxDepth, numClasses,
maxBins, categoricalFeaturesInfo);
DecisionTreeModel model = DecisionTree$.MODULE$.train(rdd.rdd(), strategy);
// java compatibility test
JavaRDD<Double> predictions = model.predict(rdd.map(LabeledPoint::features));
int numCorrect = validatePrediction(arr, model);
Assert.assertEquals(numCorrect, rdd.count());
}
}
| 9,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.