index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/DownloadFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import java.io.IOException;
/**
* A handle on the file used when fetching remote data to disk. Used to ensure the lifecycle of
* writing the data, reading it back, and then cleaning it up is followed. Specific implementations
* may also handle encryption. The data can be read only via DownloadFileWritableChannel,
* which ensures data is not read until after the writer is closed.
*/
public interface DownloadFile {
/**
* Delete the file.
*
* @return <code>true</code> if and only if the file or directory is
* successfully deleted; <code>false</code> otherwise
*/
boolean delete();
/**
* A channel for writing data to the file. This special channel allows access to the data for
* reading, after the channel is closed, via {@link DownloadFileWritableChannel#closeAndRead()}.
*/
DownloadFileWritableChannel openForWriting() throws IOException;
/**
* The path of the file, intended only for debug purposes.
*/
String path();
}
| 9,900 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ShuffleIndexInformation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import java.io.DataInputStream;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.LongBuffer;
import java.nio.file.Files;
/**
* Keeps the index information for a particular map output
* as an in-memory LongBuffer.
*/
public class ShuffleIndexInformation {
/** offsets as long buffer */
private final LongBuffer offsets;
private int size;
public ShuffleIndexInformation(File indexFile) throws IOException {
size = (int)indexFile.length();
ByteBuffer buffer = ByteBuffer.allocate(size);
offsets = buffer.asLongBuffer();
DataInputStream dis = null;
try {
dis = new DataInputStream(Files.newInputStream(indexFile.toPath()));
dis.readFully(buffer.array());
} finally {
if (dis != null) {
dis.close();
}
}
}
/**
* Size of the index file
* @return size
*/
public int getSize() {
return size;
}
/**
* Get index offset for a particular reducer.
*/
public ShuffleIndexRecord getIndex(int reduceId) {
long offset = offsets.get(reduceId);
long nextOffset = offsets.get(reduceId + 1);
return new ShuffleIndexRecord(offset, nextOffset - offset);
}
}
| 9,901 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/DownloadFileWritableChannel.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import org.apache.spark.network.buffer.ManagedBuffer;
import java.nio.channels.WritableByteChannel;
/**
* A channel for writing data which is fetched to disk, which allows access to the written data only
* after the writer has been closed. Used with DownloadFile and DownloadFileManager.
*/
public interface DownloadFileWritableChannel extends WritableByteChannel {
ManagedBuffer closeAndRead();
}
| 9,902 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/protocol/OpenBlocks.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle.protocol;
import java.util.Arrays;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
import org.apache.spark.network.protocol.Encoders;
// Needed by ScalaDoc. See SPARK-7726
import static org.apache.spark.network.shuffle.protocol.BlockTransferMessage.Type;
/** Request to read a set of blocks. Returns {@link StreamHandle}. */
public class OpenBlocks extends BlockTransferMessage {
public final String appId;
public final String execId;
public final String[] blockIds;
public OpenBlocks(String appId, String execId, String[] blockIds) {
this.appId = appId;
this.execId = execId;
this.blockIds = blockIds;
}
@Override
protected Type type() { return Type.OPEN_BLOCKS; }
@Override
public int hashCode() {
return Objects.hashCode(appId, execId) * 41 + Arrays.hashCode(blockIds);
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("appId", appId)
.add("execId", execId)
.add("blockIds", Arrays.toString(blockIds))
.toString();
}
@Override
public boolean equals(Object other) {
if (other != null && other instanceof OpenBlocks) {
OpenBlocks o = (OpenBlocks) other;
return Objects.equal(appId, o.appId)
&& Objects.equal(execId, o.execId)
&& Arrays.equals(blockIds, o.blockIds);
}
return false;
}
@Override
public int encodedLength() {
return Encoders.Strings.encodedLength(appId)
+ Encoders.Strings.encodedLength(execId)
+ Encoders.StringArrays.encodedLength(blockIds);
}
@Override
public void encode(ByteBuf buf) {
Encoders.Strings.encode(buf, appId);
Encoders.Strings.encode(buf, execId);
Encoders.StringArrays.encode(buf, blockIds);
}
public static OpenBlocks decode(ByteBuf buf) {
String appId = Encoders.Strings.decode(buf);
String execId = Encoders.Strings.decode(buf);
String[] blockIds = Encoders.StringArrays.decode(buf);
return new OpenBlocks(appId, execId, blockIds);
}
}
| 9,903 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/protocol/StreamHandle.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle.protocol;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
// Needed by ScalaDoc. See SPARK-7726
import static org.apache.spark.network.shuffle.protocol.BlockTransferMessage.Type;
/**
* Identifier for a fixed number of chunks to read from a stream created by an "open blocks"
* message. This is used by {@link org.apache.spark.network.shuffle.OneForOneBlockFetcher}.
*/
public class StreamHandle extends BlockTransferMessage {
public final long streamId;
public final int numChunks;
public StreamHandle(long streamId, int numChunks) {
this.streamId = streamId;
this.numChunks = numChunks;
}
@Override
protected Type type() { return Type.STREAM_HANDLE; }
@Override
public int hashCode() {
return Objects.hashCode(streamId, numChunks);
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("streamId", streamId)
.add("numChunks", numChunks)
.toString();
}
@Override
public boolean equals(Object other) {
if (other != null && other instanceof StreamHandle) {
StreamHandle o = (StreamHandle) other;
return Objects.equal(streamId, o.streamId)
&& Objects.equal(numChunks, o.numChunks);
}
return false;
}
@Override
public int encodedLength() {
return 8 + 4;
}
@Override
public void encode(ByteBuf buf) {
buf.writeLong(streamId);
buf.writeInt(numChunks);
}
public static StreamHandle decode(ByteBuf buf) {
long streamId = buf.readLong();
int numChunks = buf.readInt();
return new StreamHandle(streamId, numChunks);
}
}
| 9,904 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/protocol/RegisterExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle.protocol;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
import org.apache.spark.network.protocol.Encoders;
// Needed by ScalaDoc. See SPARK-7726
import static org.apache.spark.network.shuffle.protocol.BlockTransferMessage.Type;
/**
* Initial registration message between an executor and its local shuffle server.
* Returns nothing (empty byte array).
*/
public class RegisterExecutor extends BlockTransferMessage {
public final String appId;
public final String execId;
public final ExecutorShuffleInfo executorInfo;
public RegisterExecutor(
String appId,
String execId,
ExecutorShuffleInfo executorInfo) {
this.appId = appId;
this.execId = execId;
this.executorInfo = executorInfo;
}
@Override
protected Type type() { return Type.REGISTER_EXECUTOR; }
@Override
public int hashCode() {
return Objects.hashCode(appId, execId, executorInfo);
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("appId", appId)
.add("execId", execId)
.add("executorInfo", executorInfo)
.toString();
}
@Override
public boolean equals(Object other) {
if (other != null && other instanceof RegisterExecutor) {
RegisterExecutor o = (RegisterExecutor) other;
return Objects.equal(appId, o.appId)
&& Objects.equal(execId, o.execId)
&& Objects.equal(executorInfo, o.executorInfo);
}
return false;
}
@Override
public int encodedLength() {
return Encoders.Strings.encodedLength(appId)
+ Encoders.Strings.encodedLength(execId)
+ executorInfo.encodedLength();
}
@Override
public void encode(ByteBuf buf) {
Encoders.Strings.encode(buf, appId);
Encoders.Strings.encode(buf, execId);
executorInfo.encode(buf);
}
public static RegisterExecutor decode(ByteBuf buf) {
String appId = Encoders.Strings.decode(buf);
String execId = Encoders.Strings.decode(buf);
ExecutorShuffleInfo executorShuffleInfo = ExecutorShuffleInfo.decode(buf);
return new RegisterExecutor(appId, execId, executorShuffleInfo);
}
}
| 9,905 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/protocol/UploadBlock.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle.protocol;
import java.util.Arrays;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
import org.apache.spark.network.protocol.Encoders;
// Needed by ScalaDoc. See SPARK-7726
import static org.apache.spark.network.shuffle.protocol.BlockTransferMessage.Type;
/** Request to upload a block with a certain StorageLevel. Returns nothing (empty byte array). */
public class UploadBlock extends BlockTransferMessage {
public final String appId;
public final String execId;
public final String blockId;
// TODO: StorageLevel is serialized separately in here because StorageLevel is not available in
// this package. We should avoid this hack.
public final byte[] metadata;
public final byte[] blockData;
/**
* @param metadata Meta-information about block, typically StorageLevel.
* @param blockData The actual block's bytes.
*/
public UploadBlock(
String appId,
String execId,
String blockId,
byte[] metadata,
byte[] blockData) {
this.appId = appId;
this.execId = execId;
this.blockId = blockId;
this.metadata = metadata;
this.blockData = blockData;
}
@Override
protected Type type() { return Type.UPLOAD_BLOCK; }
@Override
public int hashCode() {
int objectsHashCode = Objects.hashCode(appId, execId, blockId);
return (objectsHashCode * 41 + Arrays.hashCode(metadata)) * 41 + Arrays.hashCode(blockData);
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("appId", appId)
.add("execId", execId)
.add("blockId", blockId)
.add("metadata size", metadata.length)
.add("block size", blockData.length)
.toString();
}
@Override
public boolean equals(Object other) {
if (other != null && other instanceof UploadBlock) {
UploadBlock o = (UploadBlock) other;
return Objects.equal(appId, o.appId)
&& Objects.equal(execId, o.execId)
&& Objects.equal(blockId, o.blockId)
&& Arrays.equals(metadata, o.metadata)
&& Arrays.equals(blockData, o.blockData);
}
return false;
}
@Override
public int encodedLength() {
return Encoders.Strings.encodedLength(appId)
+ Encoders.Strings.encodedLength(execId)
+ Encoders.Strings.encodedLength(blockId)
+ Encoders.ByteArrays.encodedLength(metadata)
+ Encoders.ByteArrays.encodedLength(blockData);
}
@Override
public void encode(ByteBuf buf) {
Encoders.Strings.encode(buf, appId);
Encoders.Strings.encode(buf, execId);
Encoders.Strings.encode(buf, blockId);
Encoders.ByteArrays.encode(buf, metadata);
Encoders.ByteArrays.encode(buf, blockData);
}
public static UploadBlock decode(ByteBuf buf) {
String appId = Encoders.Strings.decode(buf);
String execId = Encoders.Strings.decode(buf);
String blockId = Encoders.Strings.decode(buf);
byte[] metadata = Encoders.ByteArrays.decode(buf);
byte[] blockData = Encoders.ByteArrays.decode(buf);
return new UploadBlock(appId, execId, blockId, metadata, blockData);
}
}
| 9,906 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/protocol/ExecutorShuffleInfo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle.protocol;
import java.util.Arrays;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
import org.apache.spark.network.protocol.Encodable;
import org.apache.spark.network.protocol.Encoders;
/** Contains all configuration necessary for locating the shuffle files of an executor. */
public class ExecutorShuffleInfo implements Encodable {
/** The base set of local directories that the executor stores its shuffle files in. */
public final String[] localDirs;
/** Number of subdirectories created within each localDir. */
public final int subDirsPerLocalDir;
/** Shuffle manager (SortShuffleManager) that the executor is using. */
public final String shuffleManager;
@JsonCreator
public ExecutorShuffleInfo(
@JsonProperty("localDirs") String[] localDirs,
@JsonProperty("subDirsPerLocalDir") int subDirsPerLocalDir,
@JsonProperty("shuffleManager") String shuffleManager) {
this.localDirs = localDirs;
this.subDirsPerLocalDir = subDirsPerLocalDir;
this.shuffleManager = shuffleManager;
}
@Override
public int hashCode() {
return Objects.hashCode(subDirsPerLocalDir, shuffleManager) * 41 + Arrays.hashCode(localDirs);
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("localDirs", Arrays.toString(localDirs))
.add("subDirsPerLocalDir", subDirsPerLocalDir)
.add("shuffleManager", shuffleManager)
.toString();
}
@Override
public boolean equals(Object other) {
if (other != null && other instanceof ExecutorShuffleInfo) {
ExecutorShuffleInfo o = (ExecutorShuffleInfo) other;
return Arrays.equals(localDirs, o.localDirs)
&& Objects.equal(subDirsPerLocalDir, o.subDirsPerLocalDir)
&& Objects.equal(shuffleManager, o.shuffleManager);
}
return false;
}
@Override
public int encodedLength() {
return Encoders.StringArrays.encodedLength(localDirs)
+ 4 // int
+ Encoders.Strings.encodedLength(shuffleManager);
}
@Override
public void encode(ByteBuf buf) {
Encoders.StringArrays.encode(buf, localDirs);
buf.writeInt(subDirsPerLocalDir);
Encoders.Strings.encode(buf, shuffleManager);
}
public static ExecutorShuffleInfo decode(ByteBuf buf) {
String[] localDirs = Encoders.StringArrays.decode(buf);
int subDirsPerLocalDir = buf.readInt();
String shuffleManager = Encoders.Strings.decode(buf);
return new ExecutorShuffleInfo(localDirs, subDirsPerLocalDir, shuffleManager);
}
}
| 9,907 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/protocol/UploadBlockStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle.protocol;
import java.util.Arrays;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
import org.apache.spark.network.protocol.Encoders;
// Needed by ScalaDoc. See SPARK-7726
import static org.apache.spark.network.shuffle.protocol.BlockTransferMessage.Type;
/**
* A request to Upload a block, which the destination should receive as a stream.
*
* The actual block data is not contained here. It will be passed to the StreamCallbackWithID
* that is returned from RpcHandler.receiveStream()
*/
public class UploadBlockStream extends BlockTransferMessage {
public final String blockId;
public final byte[] metadata;
public UploadBlockStream(String blockId, byte[] metadata) {
this.blockId = blockId;
this.metadata = metadata;
}
@Override
protected Type type() { return Type.UPLOAD_BLOCK_STREAM; }
@Override
public int hashCode() {
int objectsHashCode = Objects.hashCode(blockId);
return objectsHashCode * 41 + Arrays.hashCode(metadata);
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("blockId", blockId)
.add("metadata size", metadata.length)
.toString();
}
@Override
public boolean equals(Object other) {
if (other != null && other instanceof UploadBlockStream) {
UploadBlockStream o = (UploadBlockStream) other;
return Objects.equal(blockId, o.blockId)
&& Arrays.equals(metadata, o.metadata);
}
return false;
}
@Override
public int encodedLength() {
return Encoders.Strings.encodedLength(blockId)
+ Encoders.ByteArrays.encodedLength(metadata);
}
@Override
public void encode(ByteBuf buf) {
Encoders.Strings.encode(buf, blockId);
Encoders.ByteArrays.encode(buf, metadata);
}
public static UploadBlockStream decode(ByteBuf buf) {
String blockId = Encoders.Strings.decode(buf);
byte[] metadata = Encoders.ByteArrays.decode(buf);
return new UploadBlockStream(blockId, metadata);
}
}
| 9,908 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/protocol/BlockTransferMessage.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle.protocol;
import java.nio.ByteBuffer;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import org.apache.spark.network.protocol.Encodable;
import org.apache.spark.network.shuffle.protocol.mesos.RegisterDriver;
import org.apache.spark.network.shuffle.protocol.mesos.ShuffleServiceHeartbeat;
/**
* Messages handled by the {@link org.apache.spark.network.shuffle.ExternalShuffleBlockHandler}, or
* by Spark's NettyBlockTransferService.
*
* At a high level:
* - OpenBlock is handled by both services, but only services shuffle files for the external
* shuffle service. It returns a StreamHandle.
* - UploadBlock is only handled by the NettyBlockTransferService.
* - RegisterExecutor is only handled by the external shuffle service.
*/
public abstract class BlockTransferMessage implements Encodable {
protected abstract Type type();
/** Preceding every serialized message is its type, which allows us to deserialize it. */
public enum Type {
OPEN_BLOCKS(0), UPLOAD_BLOCK(1), REGISTER_EXECUTOR(2), STREAM_HANDLE(3), REGISTER_DRIVER(4),
HEARTBEAT(5), UPLOAD_BLOCK_STREAM(6);
private final byte id;
Type(int id) {
assert id < 128 : "Cannot have more than 128 message types";
this.id = (byte) id;
}
public byte id() { return id; }
}
// NB: Java does not support static methods in interfaces, so we must put this in a static class.
public static class Decoder {
/** Deserializes the 'type' byte followed by the message itself. */
public static BlockTransferMessage fromByteBuffer(ByteBuffer msg) {
ByteBuf buf = Unpooled.wrappedBuffer(msg);
byte type = buf.readByte();
switch (type) {
case 0: return OpenBlocks.decode(buf);
case 1: return UploadBlock.decode(buf);
case 2: return RegisterExecutor.decode(buf);
case 3: return StreamHandle.decode(buf);
case 4: return RegisterDriver.decode(buf);
case 5: return ShuffleServiceHeartbeat.decode(buf);
case 6: return UploadBlockStream.decode(buf);
default: throw new IllegalArgumentException("Unknown message type: " + type);
}
}
}
/** Serializes the 'type' byte followed by the message itself. */
public ByteBuffer toByteBuffer() {
// Allow room for encoded message, plus the type byte
ByteBuf buf = Unpooled.buffer(encodedLength() + 1);
buf.writeByte(type().id);
encode(buf);
assert buf.writableBytes() == 0 : "Writable bytes remain: " + buf.writableBytes();
return buf.nioBuffer();
}
}
| 9,909 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/protocol | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/protocol/mesos/ShuffleServiceHeartbeat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle.protocol.mesos;
import io.netty.buffer.ByteBuf;
import org.apache.spark.network.protocol.Encoders;
import org.apache.spark.network.shuffle.protocol.BlockTransferMessage;
// Needed by ScalaDoc. See SPARK-7726
import static org.apache.spark.network.shuffle.protocol.BlockTransferMessage.Type;
/**
* A heartbeat sent from the driver to the MesosExternalShuffleService.
*/
public class ShuffleServiceHeartbeat extends BlockTransferMessage {
private final String appId;
public ShuffleServiceHeartbeat(String appId) {
this.appId = appId;
}
public String getAppId() { return appId; }
@Override
protected Type type() { return Type.HEARTBEAT; }
@Override
public int encodedLength() { return Encoders.Strings.encodedLength(appId); }
@Override
public void encode(ByteBuf buf) {
Encoders.Strings.encode(buf, appId);
}
public static ShuffleServiceHeartbeat decode(ByteBuf buf) {
return new ShuffleServiceHeartbeat(Encoders.Strings.decode(buf));
}
}
| 9,910 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/protocol | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/protocol/mesos/RegisterDriver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle.protocol.mesos;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
import org.apache.spark.network.protocol.Encoders;
import org.apache.spark.network.shuffle.protocol.BlockTransferMessage;
// Needed by ScalaDoc. See SPARK-7726
import static org.apache.spark.network.shuffle.protocol.BlockTransferMessage.Type;
/**
* A message sent from the driver to register with the MesosExternalShuffleService.
*/
public class RegisterDriver extends BlockTransferMessage {
private final String appId;
private final long heartbeatTimeoutMs;
public RegisterDriver(String appId, long heartbeatTimeoutMs) {
this.appId = appId;
this.heartbeatTimeoutMs = heartbeatTimeoutMs;
}
public String getAppId() { return appId; }
public long getHeartbeatTimeoutMs() { return heartbeatTimeoutMs; }
@Override
protected Type type() { return Type.REGISTER_DRIVER; }
@Override
public int encodedLength() {
return Encoders.Strings.encodedLength(appId) + Long.SIZE / Byte.SIZE;
}
@Override
public void encode(ByteBuf buf) {
Encoders.Strings.encode(buf, appId);
buf.writeLong(heartbeatTimeoutMs);
}
@Override
public int hashCode() {
return Objects.hashCode(appId, heartbeatTimeoutMs);
}
@Override
public boolean equals(Object o) {
if (!(o instanceof RegisterDriver)) {
return false;
}
return Objects.equal(appId, ((RegisterDriver) o).appId);
}
public static RegisterDriver decode(ByteBuf buf) {
String appId = Encoders.Strings.decode(buf);
long heartbeatTimeout = buf.readLong();
return new RegisterDriver(appId, heartbeatTimeout);
}
}
| 9,911 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/mesos/MesosExternalShuffleClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle.mesos;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.spark.network.shuffle.protocol.mesos.ShuffleServiceHeartbeat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.client.RpcResponseCallback;
import org.apache.spark.network.client.TransportClient;
import org.apache.spark.network.sasl.SecretKeyHolder;
import org.apache.spark.network.shuffle.ExternalShuffleClient;
import org.apache.spark.network.shuffle.protocol.mesos.RegisterDriver;
import org.apache.spark.network.util.TransportConf;
/**
* A client for talking to the external shuffle service in Mesos coarse-grained mode.
*
* This is used by the Spark driver to register with each external shuffle service on the cluster.
* The reason why the driver has to talk to the service is for cleaning up shuffle files reliably
* after the application exits. Mesos does not provide a great alternative to do this, so Spark
* has to detect this itself.
*/
public class MesosExternalShuffleClient extends ExternalShuffleClient {
private static final Logger logger = LoggerFactory.getLogger(MesosExternalShuffleClient.class);
private final ScheduledExecutorService heartbeaterThread =
Executors.newSingleThreadScheduledExecutor(
new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("mesos-external-shuffle-client-heartbeater")
.build());
/**
* Creates an Mesos external shuffle client that wraps the {@link ExternalShuffleClient}.
* Please refer to docs on {@link ExternalShuffleClient} for more information.
*/
public MesosExternalShuffleClient(
TransportConf conf,
SecretKeyHolder secretKeyHolder,
boolean authEnabled,
long registrationTimeoutMs) {
super(conf, secretKeyHolder, authEnabled, registrationTimeoutMs);
}
public void registerDriverWithShuffleService(
String host,
int port,
long heartbeatTimeoutMs,
long heartbeatIntervalMs) throws IOException, InterruptedException {
checkInit();
ByteBuffer registerDriver = new RegisterDriver(appId, heartbeatTimeoutMs).toByteBuffer();
TransportClient client = clientFactory.createClient(host, port);
client.sendRpc(registerDriver, new RegisterDriverCallback(client, heartbeatIntervalMs));
}
private class RegisterDriverCallback implements RpcResponseCallback {
private final TransportClient client;
private final long heartbeatIntervalMs;
private RegisterDriverCallback(TransportClient client, long heartbeatIntervalMs) {
this.client = client;
this.heartbeatIntervalMs = heartbeatIntervalMs;
}
@Override
public void onSuccess(ByteBuffer response) {
heartbeaterThread.scheduleAtFixedRate(
new Heartbeater(client), 0, heartbeatIntervalMs, TimeUnit.MILLISECONDS);
logger.info("Successfully registered app " + appId + " with external shuffle service.");
}
@Override
public void onFailure(Throwable e) {
logger.warn("Unable to register app " + appId + " with external shuffle service. " +
"Please manually remove shuffle data after driver exit. Error: " + e);
}
}
@Override
public void close() {
heartbeaterThread.shutdownNow();
super.close();
}
private class Heartbeater implements Runnable {
private final TransportClient client;
private Heartbeater(TransportClient client) {
this.client = client;
}
@Override
public void run() {
// TODO: Stop sending heartbeats if the shuffle service has lost the app due to timeout
client.send(new ShuffleServiceHeartbeat(appId).toByteBuffer());
}
}
}
| 9,912 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/sasl/ShuffleSecretManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.sasl;
import java.nio.ByteBuffer;
import java.util.concurrent.ConcurrentHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.util.JavaUtils;
/**
* A class that manages shuffle secret used by the external shuffle service.
*/
public class ShuffleSecretManager implements SecretKeyHolder {
private static final Logger logger = LoggerFactory.getLogger(ShuffleSecretManager.class);
private final ConcurrentHashMap<String, String> shuffleSecretMap;
// Spark user used for authenticating SASL connections
// Note that this must match the value in org.apache.spark.SecurityManager
private static final String SPARK_SASL_USER = "sparkSaslUser";
public ShuffleSecretManager() {
shuffleSecretMap = new ConcurrentHashMap<>();
}
/**
* Register an application with its secret.
* Executors need to first authenticate themselves with the same secret before
* fetching shuffle files written by other executors in this application.
*/
public void registerApp(String appId, String shuffleSecret) {
// Always put the new secret information to make sure it's the most up to date.
// Otherwise we have to specifically look at the application attempt in addition
// to the applicationId since the secrets change between application attempts on yarn.
shuffleSecretMap.put(appId, shuffleSecret);
logger.info("Registered shuffle secret for application {}", appId);
}
/**
* Register an application with its secret specified as a byte buffer.
*/
public void registerApp(String appId, ByteBuffer shuffleSecret) {
registerApp(appId, JavaUtils.bytesToString(shuffleSecret));
}
/**
* Unregister an application along with its secret.
* This is called when the application terminates.
*/
public void unregisterApp(String appId) {
shuffleSecretMap.remove(appId);
logger.info("Unregistered shuffle secret for application {}", appId);
}
/**
* Return the Spark user for authenticating SASL connections.
*/
@Override
public String getSaslUser(String appId) {
return SPARK_SASL_USER;
}
/**
* Return the secret key registered with the given application.
* This key is used to authenticate the executors before they can fetch shuffle files
* written by this application from the external shuffle service. If the specified
* application is not registered, return null.
*/
@Override
public String getSecretKey(String appId) {
return shuffleSecretMap.get(appId);
}
}
| 9,913 |
0 | Create_ds/spark/common/tags/src/test/java/org/apache/spark | Create_ds/spark/common/tags/src/test/java/org/apache/spark/tags/DockerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.tags;
import java.lang.annotation.*;
import org.scalatest.TagAnnotation;
@TagAnnotation
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.METHOD, ElementType.TYPE})
public @interface DockerTest { }
| 9,914 |
0 | Create_ds/spark/common/tags/src/test/java/org/apache/spark | Create_ds/spark/common/tags/src/test/java/org/apache/spark/tags/ExtendedHiveTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.tags;
import java.lang.annotation.*;
import org.scalatest.TagAnnotation;
@TagAnnotation
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.METHOD, ElementType.TYPE})
public @interface ExtendedHiveTest { }
| 9,915 |
0 | Create_ds/spark/common/tags/src/test/java/org/apache/spark | Create_ds/spark/common/tags/src/test/java/org/apache/spark/tags/ExtendedYarnTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.tags;
import java.lang.annotation.*;
import org.scalatest.TagAnnotation;
@TagAnnotation
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.METHOD, ElementType.TYPE})
public @interface ExtendedYarnTest { }
| 9,916 |
0 | Create_ds/spark/common/tags/src/main/java/org/apache/spark | Create_ds/spark/common/tags/src/main/java/org/apache/spark/annotation/AlphaComponent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.annotation;
import java.lang.annotation.*;
/**
* A new component of Spark which may have unstable API's.
*
* NOTE: If there exists a Scaladoc comment that immediately precedes this annotation, the first
* line of the comment must be ":: AlphaComponent ::" with no trailing blank line. This is because
* of the known issue that Scaladoc displays only either the annotation or the comment, whichever
* comes first.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER,
ElementType.CONSTRUCTOR, ElementType.LOCAL_VARIABLE, ElementType.PACKAGE})
public @interface AlphaComponent {}
| 9,917 |
0 | Create_ds/spark/common/tags/src/main/java/org/apache/spark | Create_ds/spark/common/tags/src/main/java/org/apache/spark/annotation/InterfaceStability.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.annotation;
import java.lang.annotation.Documented;
/**
* Annotation to inform users of how much to rely on a particular package,
* class or method not changing over time.
*/
public class InterfaceStability {
/**
* Stable APIs that retain source and binary compatibility within a major release.
* These interfaces can change from one major release to another major release
* (e.g. from 1.0 to 2.0).
*/
@Documented
public @interface Stable {};
/**
* APIs that are meant to evolve towards becoming stable APIs, but are not stable APIs yet.
* Evolving interfaces can change from one feature release to another release (i.e. 2.1 to 2.2).
*/
@Documented
public @interface Evolving {};
/**
* Unstable APIs, with no guarantee on stability.
* Classes that are unannotated are considered Unstable.
*/
@Documented
public @interface Unstable {};
}
| 9,918 |
0 | Create_ds/spark/common/tags/src/main/java/org/apache/spark | Create_ds/spark/common/tags/src/main/java/org/apache/spark/annotation/DeveloperApi.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.annotation;
import java.lang.annotation.*;
/**
* A lower-level, unstable API intended for developers.
*
* Developer API's might change or be removed in minor versions of Spark.
*
* NOTE: If there exists a Scaladoc comment that immediately precedes this annotation, the first
* line of the comment must be ":: DeveloperApi ::" with no trailing blank line. This is because
* of the known issue that Scaladoc displays only either the annotation or the comment, whichever
* comes first.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER,
ElementType.CONSTRUCTOR, ElementType.LOCAL_VARIABLE, ElementType.PACKAGE})
public @interface DeveloperApi {}
| 9,919 |
0 | Create_ds/spark/common/tags/src/main/java/org/apache/spark | Create_ds/spark/common/tags/src/main/java/org/apache/spark/annotation/Experimental.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.annotation;
import java.lang.annotation.*;
/**
* An experimental user-facing API.
*
* Experimental API's might change or be removed in minor versions of Spark, or be adopted as
* first-class Spark API's.
*
* NOTE: If there exists a Scaladoc comment that immediately precedes this annotation, the first
* line of the comment must be ":: Experimental ::" with no trailing blank line. This is because
* of the known issue that Scaladoc displays only either the annotation or the comment, whichever
* comes first.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER,
ElementType.CONSTRUCTOR, ElementType.LOCAL_VARIABLE, ElementType.PACKAGE})
public @interface Experimental {}
| 9,920 |
0 | Create_ds/spark/common/tags/src/main/java/org/apache/spark | Create_ds/spark/common/tags/src/main/java/org/apache/spark/annotation/Private.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.annotation;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* A class that is considered private to the internals of Spark -- there is a high-likelihood
* they will be changed in future versions of Spark.
*
* This should be used only when the standard Scala / Java means of protecting classes are
* insufficient. In particular, Java has no equivalent of private[spark], so we use this annotation
* in its place.
*
* NOTE: If there exists a Scaladoc comment that immediately precedes this annotation, the first
* line of the comment must be ":: Private ::" with no trailing blank line. This is because
* of the known issue that Scaladoc displays only either the annotation or the comment, whichever
* comes first.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER,
ElementType.CONSTRUCTOR, ElementType.LOCAL_VARIABLE, ElementType.PACKAGE})
public @interface Private {}
| 9,921 |
0 | Create_ds/spark/common/tags/src/main/scala/org/apache/spark | Create_ds/spark/common/tags/src/main/scala/org/apache/spark/annotation/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Spark annotations to mark an API experimental or intended only for advanced usages by developers.
* This package consist of these annotations, which are used project wide and are reflected in
* Scala and Java docs.
*/
package org.apache.spark.annotation;
| 9,922 |
0 | Create_ds/spark/common/unsafe/src/test/java/org/apache/spark | Create_ds/spark/common/unsafe/src/test/java/org/apache/spark/unsafe/PlatformUtilSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe;
import org.apache.spark.unsafe.memory.HeapMemoryAllocator;
import org.apache.spark.unsafe.memory.MemoryAllocator;
import org.apache.spark.unsafe.memory.MemoryBlock;
import org.junit.Assert;
import org.junit.Test;
public class PlatformUtilSuite {
@Test
public void overlappingCopyMemory() {
byte[] data = new byte[3 * 1024 * 1024];
int size = 2 * 1024 * 1024;
for (int i = 0; i < data.length; ++i) {
data[i] = (byte)i;
}
Platform.copyMemory(data, Platform.BYTE_ARRAY_OFFSET, data, Platform.BYTE_ARRAY_OFFSET, size);
for (int i = 0; i < data.length; ++i) {
Assert.assertEquals((byte)i, data[i]);
}
Platform.copyMemory(
data,
Platform.BYTE_ARRAY_OFFSET + 1,
data,
Platform.BYTE_ARRAY_OFFSET,
size);
for (int i = 0; i < size; ++i) {
Assert.assertEquals((byte)(i + 1), data[i]);
}
for (int i = 0; i < data.length; ++i) {
data[i] = (byte)i;
}
Platform.copyMemory(
data,
Platform.BYTE_ARRAY_OFFSET,
data,
Platform.BYTE_ARRAY_OFFSET + 1,
size);
for (int i = 0; i < size; ++i) {
Assert.assertEquals((byte)i, data[i + 1]);
}
}
@Test
public void onHeapMemoryAllocatorPoolingReUsesLongArrays() {
MemoryBlock block1 = MemoryAllocator.HEAP.allocate(1024 * 1024);
Object baseObject1 = block1.getBaseObject();
MemoryAllocator.HEAP.free(block1);
MemoryBlock block2 = MemoryAllocator.HEAP.allocate(1024 * 1024);
Object baseObject2 = block2.getBaseObject();
Assert.assertSame(baseObject1, baseObject2);
MemoryAllocator.HEAP.free(block2);
}
@Test
public void freeingOnHeapMemoryBlockResetsBaseObjectAndOffset() {
MemoryBlock block = MemoryAllocator.HEAP.allocate(1024);
Assert.assertNotNull(block.getBaseObject());
MemoryAllocator.HEAP.free(block);
Assert.assertNull(block.getBaseObject());
Assert.assertEquals(0, block.getBaseOffset());
Assert.assertEquals(MemoryBlock.FREED_IN_ALLOCATOR_PAGE_NUMBER, block.pageNumber);
}
@Test
public void freeingOffHeapMemoryBlockResetsOffset() {
MemoryBlock block = MemoryAllocator.UNSAFE.allocate(1024);
Assert.assertNull(block.getBaseObject());
Assert.assertNotEquals(0, block.getBaseOffset());
MemoryAllocator.UNSAFE.free(block);
Assert.assertNull(block.getBaseObject());
Assert.assertEquals(0, block.getBaseOffset());
Assert.assertEquals(MemoryBlock.FREED_IN_ALLOCATOR_PAGE_NUMBER, block.pageNumber);
}
@Test(expected = AssertionError.class)
public void onHeapMemoryAllocatorThrowsAssertionErrorOnDoubleFree() {
MemoryBlock block = MemoryAllocator.HEAP.allocate(1024);
MemoryAllocator.HEAP.free(block);
MemoryAllocator.HEAP.free(block);
}
@Test(expected = AssertionError.class)
public void offHeapMemoryAllocatorThrowsAssertionErrorOnDoubleFree() {
MemoryBlock block = MemoryAllocator.UNSAFE.allocate(1024);
MemoryAllocator.UNSAFE.free(block);
MemoryAllocator.UNSAFE.free(block);
}
@Test
public void memoryDebugFillEnabledInTest() {
Assert.assertTrue(MemoryAllocator.MEMORY_DEBUG_FILL_ENABLED);
MemoryBlock onheap = MemoryAllocator.HEAP.allocate(1);
Assert.assertEquals(
Platform.getByte(onheap.getBaseObject(), onheap.getBaseOffset()),
MemoryAllocator.MEMORY_DEBUG_FILL_CLEAN_VALUE);
MemoryBlock onheap1 = MemoryAllocator.HEAP.allocate(1024 * 1024);
Object onheap1BaseObject = onheap1.getBaseObject();
long onheap1BaseOffset = onheap1.getBaseOffset();
MemoryAllocator.HEAP.free(onheap1);
Assert.assertEquals(
Platform.getByte(onheap1BaseObject, onheap1BaseOffset),
MemoryAllocator.MEMORY_DEBUG_FILL_FREED_VALUE);
MemoryBlock onheap2 = MemoryAllocator.HEAP.allocate(1024 * 1024);
Assert.assertEquals(
Platform.getByte(onheap2.getBaseObject(), onheap2.getBaseOffset()),
MemoryAllocator.MEMORY_DEBUG_FILL_CLEAN_VALUE);
MemoryBlock offheap = MemoryAllocator.UNSAFE.allocate(1);
Assert.assertEquals(
Platform.getByte(offheap.getBaseObject(), offheap.getBaseOffset()),
MemoryAllocator.MEMORY_DEBUG_FILL_CLEAN_VALUE);
MemoryAllocator.UNSAFE.free(offheap);
}
@Test
public void heapMemoryReuse() {
MemoryAllocator heapMem = new HeapMemoryAllocator();
// The size is less than `HeapMemoryAllocator.POOLING_THRESHOLD_BYTES`,
// allocate new memory every time.
MemoryBlock onheap1 = heapMem.allocate(513);
Object obj1 = onheap1.getBaseObject();
heapMem.free(onheap1);
MemoryBlock onheap2 = heapMem.allocate(514);
Assert.assertNotEquals(obj1, onheap2.getBaseObject());
// The size is greater than `HeapMemoryAllocator.POOLING_THRESHOLD_BYTES`,
// reuse the previous memory which has released.
MemoryBlock onheap3 = heapMem.allocate(1024 * 1024 + 1);
Assert.assertEquals(onheap3.size(), 1024 * 1024 + 1);
Object obj3 = onheap3.getBaseObject();
heapMem.free(onheap3);
MemoryBlock onheap4 = heapMem.allocate(1024 * 1024 + 7);
Assert.assertEquals(onheap4.size(), 1024 * 1024 + 7);
Assert.assertEquals(obj3, onheap4.getBaseObject());
}
}
| 9,923 |
0 | Create_ds/spark/common/unsafe/src/test/java/org/apache/spark/unsafe | Create_ds/spark/common/unsafe/src/test/java/org/apache/spark/unsafe/types/CalendarIntervalSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.types;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.apache.spark.unsafe.types.CalendarInterval.*;
public class CalendarIntervalSuite {
@Test
public void equalsTest() {
CalendarInterval i1 = new CalendarInterval(3, 123);
CalendarInterval i2 = new CalendarInterval(3, 321);
CalendarInterval i3 = new CalendarInterval(1, 123);
CalendarInterval i4 = new CalendarInterval(3, 123);
assertNotSame(i1, i2);
assertNotSame(i1, i3);
assertNotSame(i2, i3);
assertEquals(i1, i4);
}
@Test
public void toStringTest() {
CalendarInterval i;
i = new CalendarInterval(0, 0);
assertEquals("interval 0 microseconds", i.toString());
i = new CalendarInterval(34, 0);
assertEquals("interval 2 years 10 months", i.toString());
i = new CalendarInterval(-34, 0);
assertEquals("interval -2 years -10 months", i.toString());
i = new CalendarInterval(0, 3 * MICROS_PER_WEEK + 13 * MICROS_PER_HOUR + 123);
assertEquals("interval 3 weeks 13 hours 123 microseconds", i.toString());
i = new CalendarInterval(0, -3 * MICROS_PER_WEEK - 13 * MICROS_PER_HOUR - 123);
assertEquals("interval -3 weeks -13 hours -123 microseconds", i.toString());
i = new CalendarInterval(34, 3 * MICROS_PER_WEEK + 13 * MICROS_PER_HOUR + 123);
assertEquals("interval 2 years 10 months 3 weeks 13 hours 123 microseconds", i.toString());
}
@Test
public void fromStringTest() {
testSingleUnit("year", 3, 36, 0);
testSingleUnit("month", 3, 3, 0);
testSingleUnit("week", 3, 0, 3 * MICROS_PER_WEEK);
testSingleUnit("day", 3, 0, 3 * MICROS_PER_DAY);
testSingleUnit("hour", 3, 0, 3 * MICROS_PER_HOUR);
testSingleUnit("minute", 3, 0, 3 * MICROS_PER_MINUTE);
testSingleUnit("second", 3, 0, 3 * MICROS_PER_SECOND);
testSingleUnit("millisecond", 3, 0, 3 * MICROS_PER_MILLI);
testSingleUnit("microsecond", 3, 0, 3);
String input;
input = "interval -5 years 23 month";
CalendarInterval result = new CalendarInterval(-5 * 12 + 23, 0);
assertEquals(fromString(input), result);
input = "interval -5 years 23 month ";
assertEquals(fromString(input), result);
input = " interval -5 years 23 month ";
assertEquals(fromString(input), result);
// Error cases
input = "interval 3month 1 hour";
assertNull(fromString(input));
input = "interval 3 moth 1 hour";
assertNull(fromString(input));
input = "interval";
assertNull(fromString(input));
input = "int";
assertNull(fromString(input));
input = "";
assertNull(fromString(input));
input = null;
assertNull(fromString(input));
}
@Test
public void fromCaseInsensitiveStringTest() {
for (String input : new String[]{"5 MINUTES", "5 minutes", "5 Minutes"}) {
assertEquals(fromCaseInsensitiveString(input), new CalendarInterval(0, 5L * 60 * 1_000_000));
}
for (String input : new String[]{null, "", " "}) {
try {
fromCaseInsensitiveString(input);
fail("Expected to throw an exception for the invalid input");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("cannot be null or blank"));
}
}
for (String input : new String[]{"interval", "interval1 day", "foo", "foo 1 day"}) {
try {
fromCaseInsensitiveString(input);
fail("Expected to throw an exception for the invalid input");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("Invalid interval"));
}
}
}
@Test
public void fromYearMonthStringTest() {
String input;
CalendarInterval i;
input = "99-10";
i = new CalendarInterval(99 * 12 + 10, 0L);
assertEquals(fromYearMonthString(input), i);
input = "-8-10";
i = new CalendarInterval(-8 * 12 - 10, 0L);
assertEquals(fromYearMonthString(input), i);
try {
input = "99-15";
fromYearMonthString(input);
fail("Expected to throw an exception for the invalid input");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("month 15 outside range"));
}
}
@Test
public void fromDayTimeStringTest() {
String input;
CalendarInterval i;
input = "5 12:40:30.999999999";
i = new CalendarInterval(0, 5 * MICROS_PER_DAY + 12 * MICROS_PER_HOUR +
40 * MICROS_PER_MINUTE + 30 * MICROS_PER_SECOND + 999999L);
assertEquals(fromDayTimeString(input), i);
input = "10 0:12:0.888";
i = new CalendarInterval(0, 10 * MICROS_PER_DAY + 12 * MICROS_PER_MINUTE +
888 * MICROS_PER_MILLI);
assertEquals(fromDayTimeString(input), i);
input = "-3 0:0:0";
i = new CalendarInterval(0, -3 * MICROS_PER_DAY);
assertEquals(fromDayTimeString(input), i);
try {
input = "5 30:12:20";
fromDayTimeString(input);
fail("Expected to throw an exception for the invalid input");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("hour 30 outside range"));
}
try {
input = "5 30-12";
fromDayTimeString(input);
fail("Expected to throw an exception for the invalid input");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("not match day-time format"));
}
}
@Test
public void fromSingleUnitStringTest() {
String input;
CalendarInterval i;
input = "12";
i = new CalendarInterval(12 * 12, 0L);
assertEquals(fromSingleUnitString("year", input), i);
input = "100";
i = new CalendarInterval(0, 100 * MICROS_PER_DAY);
assertEquals(fromSingleUnitString("day", input), i);
input = "1999.38888";
i = new CalendarInterval(0, 1999 * MICROS_PER_SECOND + 38);
assertEquals(fromSingleUnitString("second", input), i);
try {
input = String.valueOf(Integer.MAX_VALUE);
fromSingleUnitString("year", input);
fail("Expected to throw an exception for the invalid input");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("outside range"));
}
try {
input = String.valueOf(Long.MAX_VALUE / MICROS_PER_HOUR + 1);
fromSingleUnitString("hour", input);
fail("Expected to throw an exception for the invalid input");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("outside range"));
}
}
@Test
public void addTest() {
String input = "interval 3 month 1 hour";
String input2 = "interval 2 month 100 hour";
CalendarInterval interval = fromString(input);
CalendarInterval interval2 = fromString(input2);
assertEquals(interval.add(interval2), new CalendarInterval(5, 101 * MICROS_PER_HOUR));
input = "interval -10 month -81 hour";
input2 = "interval 75 month 200 hour";
interval = fromString(input);
interval2 = fromString(input2);
assertEquals(interval.add(interval2), new CalendarInterval(65, 119 * MICROS_PER_HOUR));
}
@Test
public void subtractTest() {
String input = "interval 3 month 1 hour";
String input2 = "interval 2 month 100 hour";
CalendarInterval interval = fromString(input);
CalendarInterval interval2 = fromString(input2);
assertEquals(interval.subtract(interval2), new CalendarInterval(1, -99 * MICROS_PER_HOUR));
input = "interval -10 month -81 hour";
input2 = "interval 75 month 200 hour";
interval = fromString(input);
interval2 = fromString(input2);
assertEquals(interval.subtract(interval2), new CalendarInterval(-85, -281 * MICROS_PER_HOUR));
}
private static void testSingleUnit(String unit, int number, int months, long microseconds) {
String input1 = "interval " + number + " " + unit;
String input2 = "interval " + number + " " + unit + "s";
CalendarInterval result = new CalendarInterval(months, microseconds);
assertEquals(fromString(input1), result);
assertEquals(fromString(input2), result);
}
}
| 9,924 |
0 | Create_ds/spark/common/unsafe/src/test/java/org/apache/spark/unsafe | Create_ds/spark/common/unsafe/src/test/java/org/apache/spark/unsafe/types/UTF8StringSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.types;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.charset.StandardCharsets;
import java.util.*;
import com.google.common.collect.ImmutableMap;
import org.apache.spark.unsafe.Platform;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.apache.spark.unsafe.Platform.BYTE_ARRAY_OFFSET;
import static org.apache.spark.unsafe.types.UTF8String.*;
public class UTF8StringSuite {
private static void checkBasic(String str, int len) {
UTF8String s1 = fromString(str);
UTF8String s2 = fromBytes(str.getBytes(StandardCharsets.UTF_8));
assertEquals(s1.numChars(), len);
assertEquals(s2.numChars(), len);
assertEquals(s1.toString(), str);
assertEquals(s2.toString(), str);
assertEquals(s1, s2);
assertEquals(s1.hashCode(), s2.hashCode());
assertEquals(0, s1.compareTo(s2));
assertTrue(s1.contains(s2));
assertTrue(s2.contains(s1));
assertTrue(s1.startsWith(s2));
assertTrue(s1.endsWith(s2));
}
@Test
public void basicTest() {
checkBasic("", 0);
checkBasic("¡", 1); // 2 bytes char
checkBasic("ку", 2); // 2 * 2 bytes chars
checkBasic("hello", 5); // 5 * 1 byte chars
checkBasic("大 千 世 界", 7);
checkBasic("︽﹋%", 3); // 3 * 3 bytes chars
checkBasic("\uD83E\uDD19", 1); // 4 bytes char
}
@Test
public void emptyStringTest() {
assertEquals(EMPTY_UTF8, fromString(""));
assertEquals(EMPTY_UTF8, fromBytes(new byte[0]));
assertEquals(0, EMPTY_UTF8.numChars());
assertEquals(0, EMPTY_UTF8.numBytes());
}
@Test
public void prefix() {
assertTrue(fromString("a").getPrefix() - fromString("b").getPrefix() < 0);
assertTrue(fromString("ab").getPrefix() - fromString("b").getPrefix() < 0);
assertTrue(
fromString("abbbbbbbbbbbasdf").getPrefix() - fromString("bbbbbbbbbbbbasdf").getPrefix() < 0);
assertTrue(fromString("").getPrefix() - fromString("a").getPrefix() < 0);
assertTrue(fromString("你好").getPrefix() - fromString("世界").getPrefix() > 0);
byte[] buf1 = {1, 2, 3, 4, 5, 6, 7, 8, 9};
byte[] buf2 = {1, 2, 3};
UTF8String str1 = fromBytes(buf1, 0, 3);
UTF8String str2 = fromBytes(buf1, 0, 8);
UTF8String str3 = fromBytes(buf2);
assertTrue(str1.getPrefix() - str2.getPrefix() < 0);
assertEquals(str1.getPrefix(), str3.getPrefix());
}
@Test
public void compareTo() {
assertTrue(fromString("").compareTo(fromString("a")) < 0);
assertTrue(fromString("abc").compareTo(fromString("ABC")) > 0);
assertTrue(fromString("abc0").compareTo(fromString("abc")) > 0);
assertTrue(fromString("abcabcabc").compareTo(fromString("abcabcabc")) == 0);
assertTrue(fromString("aBcabcabc").compareTo(fromString("Abcabcabc")) > 0);
assertTrue(fromString("Abcabcabc").compareTo(fromString("abcabcabC")) < 0);
assertTrue(fromString("abcabcabc").compareTo(fromString("abcabcabC")) > 0);
assertTrue(fromString("abc").compareTo(fromString("世界")) < 0);
assertTrue(fromString("你好").compareTo(fromString("世界")) > 0);
assertTrue(fromString("你好123").compareTo(fromString("你好122")) > 0);
}
protected static void testUpperandLower(String upper, String lower) {
UTF8String us = fromString(upper);
UTF8String ls = fromString(lower);
assertEquals(ls, us.toLowerCase());
assertEquals(us, ls.toUpperCase());
assertEquals(us, us.toUpperCase());
assertEquals(ls, ls.toLowerCase());
}
@Test
public void upperAndLower() {
testUpperandLower("", "");
testUpperandLower("0123456", "0123456");
testUpperandLower("ABCXYZ", "abcxyz");
testUpperandLower("ЀЁЂѺΏỀ", "ѐёђѻώề");
testUpperandLower("大千世界 数据砖头", "大千世界 数据砖头");
}
@Test
public void titleCase() {
assertEquals(fromString(""), fromString("").toTitleCase());
assertEquals(fromString("Ab Bc Cd"), fromString("ab bc cd").toTitleCase());
assertEquals(fromString("Ѐ Ё Ђ Ѻ Ώ Ề"), fromString("ѐ ё ђ ѻ ώ ề").toTitleCase());
assertEquals(fromString("大千世界 数据砖头"), fromString("大千世界 数据砖头").toTitleCase());
}
@Test
public void concatTest() {
assertEquals(EMPTY_UTF8, concat());
assertNull(concat((UTF8String) null));
assertEquals(EMPTY_UTF8, concat(EMPTY_UTF8));
assertEquals(fromString("ab"), concat(fromString("ab")));
assertEquals(fromString("ab"), concat(fromString("a"), fromString("b")));
assertEquals(fromString("abc"), concat(fromString("a"), fromString("b"), fromString("c")));
assertNull(concat(fromString("a"), null, fromString("c")));
assertNull(concat(fromString("a"), null, null));
assertNull(concat(null, null, null));
assertEquals(fromString("数据砖头"), concat(fromString("数据"), fromString("砖头")));
}
@Test
public void concatWsTest() {
// Returns null if the separator is null
assertNull(concatWs(null, (UTF8String) null));
assertNull(concatWs(null, fromString("a")));
// If separator is null, concatWs should skip all null inputs and never return null.
UTF8String sep = fromString("哈哈");
assertEquals(
EMPTY_UTF8,
concatWs(sep, EMPTY_UTF8));
assertEquals(
fromString("ab"),
concatWs(sep, fromString("ab")));
assertEquals(
fromString("a哈哈b"),
concatWs(sep, fromString("a"), fromString("b")));
assertEquals(
fromString("a哈哈b哈哈c"),
concatWs(sep, fromString("a"), fromString("b"), fromString("c")));
assertEquals(
fromString("a哈哈c"),
concatWs(sep, fromString("a"), null, fromString("c")));
assertEquals(
fromString("a"),
concatWs(sep, fromString("a"), null, null));
assertEquals(
EMPTY_UTF8,
concatWs(sep, null, null, null));
assertEquals(
fromString("数据哈哈砖头"),
concatWs(sep, fromString("数据"), fromString("砖头")));
}
@Test
public void contains() {
assertTrue(EMPTY_UTF8.contains(EMPTY_UTF8));
assertTrue(fromString("hello").contains(fromString("ello")));
assertFalse(fromString("hello").contains(fromString("vello")));
assertFalse(fromString("hello").contains(fromString("hellooo")));
assertTrue(fromString("大千世界").contains(fromString("千世界")));
assertFalse(fromString("大千世界").contains(fromString("世千")));
assertFalse(fromString("大千世界").contains(fromString("大千世界好")));
}
@Test
public void startsWith() {
assertTrue(EMPTY_UTF8.startsWith(EMPTY_UTF8));
assertTrue(fromString("hello").startsWith(fromString("hell")));
assertFalse(fromString("hello").startsWith(fromString("ell")));
assertFalse(fromString("hello").startsWith(fromString("hellooo")));
assertTrue(fromString("数据砖头").startsWith(fromString("数据")));
assertFalse(fromString("大千世界").startsWith(fromString("千")));
assertFalse(fromString("大千世界").startsWith(fromString("大千世界好")));
}
@Test
public void endsWith() {
assertTrue(EMPTY_UTF8.endsWith(EMPTY_UTF8));
assertTrue(fromString("hello").endsWith(fromString("ello")));
assertFalse(fromString("hello").endsWith(fromString("ellov")));
assertFalse(fromString("hello").endsWith(fromString("hhhello")));
assertTrue(fromString("大千世界").endsWith(fromString("世界")));
assertFalse(fromString("大千世界").endsWith(fromString("世")));
assertFalse(fromString("数据砖头").endsWith(fromString("我的数据砖头")));
}
@Test
public void substring() {
assertEquals(EMPTY_UTF8, fromString("hello").substring(0, 0));
assertEquals(fromString("el"), fromString("hello").substring(1, 3));
assertEquals(fromString("数"), fromString("数据砖头").substring(0, 1));
assertEquals(fromString("据砖"), fromString("数据砖头").substring(1, 3));
assertEquals(fromString("头"), fromString("数据砖头").substring(3, 5));
assertEquals(fromString("ߵ梷"), fromString("ߵ梷").substring(0, 2));
}
@Test
public void trims() {
assertEquals(fromString("1"), fromString("1").trim());
assertEquals(fromString("hello"), fromString(" hello ").trim());
assertEquals(fromString("hello "), fromString(" hello ").trimLeft());
assertEquals(fromString(" hello"), fromString(" hello ").trimRight());
assertEquals(EMPTY_UTF8, EMPTY_UTF8.trim());
assertEquals(EMPTY_UTF8, fromString(" ").trim());
assertEquals(EMPTY_UTF8, fromString(" ").trimLeft());
assertEquals(EMPTY_UTF8, fromString(" ").trimRight());
assertEquals(fromString("数据砖头"), fromString(" 数据砖头 ").trim());
assertEquals(fromString("数据砖头 "), fromString(" 数据砖头 ").trimLeft());
assertEquals(fromString(" 数据砖头"), fromString(" 数据砖头 ").trimRight());
assertEquals(fromString("数据砖头"), fromString("数据砖头").trim());
assertEquals(fromString("数据砖头"), fromString("数据砖头").trimLeft());
assertEquals(fromString("数据砖头"), fromString("数据砖头").trimRight());
char[] charsLessThan0x20 = new char[10];
Arrays.fill(charsLessThan0x20, (char)(' ' - 1));
String stringStartingWithSpace =
new String(charsLessThan0x20) + "hello" + new String(charsLessThan0x20);
assertEquals(fromString(stringStartingWithSpace), fromString(stringStartingWithSpace).trim());
assertEquals(fromString(stringStartingWithSpace),
fromString(stringStartingWithSpace).trimLeft());
assertEquals(fromString(stringStartingWithSpace),
fromString(stringStartingWithSpace).trimRight());
}
@Test
public void indexOf() {
assertEquals(0, EMPTY_UTF8.indexOf(EMPTY_UTF8, 0));
assertEquals(-1, EMPTY_UTF8.indexOf(fromString("l"), 0));
assertEquals(0, fromString("hello").indexOf(EMPTY_UTF8, 0));
assertEquals(2, fromString("hello").indexOf(fromString("l"), 0));
assertEquals(3, fromString("hello").indexOf(fromString("l"), 3));
assertEquals(-1, fromString("hello").indexOf(fromString("a"), 0));
assertEquals(2, fromString("hello").indexOf(fromString("ll"), 0));
assertEquals(-1, fromString("hello").indexOf(fromString("ll"), 4));
assertEquals(1, fromString("数据砖头").indexOf(fromString("据砖"), 0));
assertEquals(-1, fromString("数据砖头").indexOf(fromString("数"), 3));
assertEquals(0, fromString("数据砖头").indexOf(fromString("数"), 0));
assertEquals(3, fromString("数据砖头").indexOf(fromString("头"), 0));
}
@Test
public void substring_index() {
assertEquals(fromString("www.apache.org"),
fromString("www.apache.org").subStringIndex(fromString("."), 3));
assertEquals(fromString("www.apache"),
fromString("www.apache.org").subStringIndex(fromString("."), 2));
assertEquals(fromString("www"),
fromString("www.apache.org").subStringIndex(fromString("."), 1));
assertEquals(fromString(""),
fromString("www.apache.org").subStringIndex(fromString("."), 0));
assertEquals(fromString("org"),
fromString("www.apache.org").subStringIndex(fromString("."), -1));
assertEquals(fromString("apache.org"),
fromString("www.apache.org").subStringIndex(fromString("."), -2));
assertEquals(fromString("www.apache.org"),
fromString("www.apache.org").subStringIndex(fromString("."), -3));
// str is empty string
assertEquals(fromString(""),
fromString("").subStringIndex(fromString("."), 1));
// empty string delim
assertEquals(fromString(""),
fromString("www.apache.org").subStringIndex(fromString(""), 1));
// delim does not exist in str
assertEquals(fromString("www.apache.org"),
fromString("www.apache.org").subStringIndex(fromString("#"), 2));
// delim is 2 chars
assertEquals(fromString("www||apache"),
fromString("www||apache||org").subStringIndex(fromString("||"), 2));
assertEquals(fromString("apache||org"),
fromString("www||apache||org").subStringIndex(fromString("||"), -2));
// non ascii chars
assertEquals(fromString("大千世界大"),
fromString("大千世界大千世界").subStringIndex(fromString("千"), 2));
// overlapped delim
assertEquals(fromString("||"), fromString("||||||").subStringIndex(fromString("|||"), 3));
assertEquals(fromString("|||"), fromString("||||||").subStringIndex(fromString("|||"), -4));
}
@Test
public void reverse() {
assertEquals(fromString("olleh"), fromString("hello").reverse());
assertEquals(EMPTY_UTF8, EMPTY_UTF8.reverse());
assertEquals(fromString("者行孙"), fromString("孙行者").reverse());
assertEquals(fromString("者行孙 olleh"), fromString("hello 孙行者").reverse());
}
@Test
public void repeat() {
assertEquals(fromString("数d数d数d数d数d"), fromString("数d").repeat(5));
assertEquals(fromString("数d"), fromString("数d").repeat(1));
assertEquals(EMPTY_UTF8, fromString("数d").repeat(-1));
}
@Test
public void pad() {
assertEquals(fromString("hel"), fromString("hello").lpad(3, fromString("????")));
assertEquals(fromString("hello"), fromString("hello").lpad(5, fromString("????")));
assertEquals(fromString("?hello"), fromString("hello").lpad(6, fromString("????")));
assertEquals(fromString("???????hello"), fromString("hello").lpad(12, fromString("????")));
assertEquals(fromString("?????hello"), fromString("hello").lpad(10, fromString("?????")));
assertEquals(fromString("???????"), EMPTY_UTF8.lpad(7, fromString("?????")));
assertEquals(fromString("hel"), fromString("hello").rpad(3, fromString("????")));
assertEquals(fromString("hello"), fromString("hello").rpad(5, fromString("????")));
assertEquals(fromString("hello?"), fromString("hello").rpad(6, fromString("????")));
assertEquals(fromString("hello???????"), fromString("hello").rpad(12, fromString("????")));
assertEquals(fromString("hello?????"), fromString("hello").rpad(10, fromString("?????")));
assertEquals(fromString("???????"), EMPTY_UTF8.rpad(7, fromString("?????")));
assertEquals(fromString("数据砖"), fromString("数据砖头").lpad(3, fromString("????")));
assertEquals(fromString("?数据砖头"), fromString("数据砖头").lpad(5, fromString("????")));
assertEquals(fromString("??数据砖头"), fromString("数据砖头").lpad(6, fromString("????")));
assertEquals(fromString("孙行数据砖头"), fromString("数据砖头").lpad(6, fromString("孙行者")));
assertEquals(fromString("孙行者数据砖头"), fromString("数据砖头").lpad(7, fromString("孙行者")));
assertEquals(
fromString("孙行者孙行者孙行数据砖头"),
fromString("数据砖头").lpad(12, fromString("孙行者")));
assertEquals(fromString("数据砖"), fromString("数据砖头").rpad(3, fromString("????")));
assertEquals(fromString("数据砖头?"), fromString("数据砖头").rpad(5, fromString("????")));
assertEquals(fromString("数据砖头??"), fromString("数据砖头").rpad(6, fromString("????")));
assertEquals(fromString("数据砖头孙行"), fromString("数据砖头").rpad(6, fromString("孙行者")));
assertEquals(fromString("数据砖头孙行者"), fromString("数据砖头").rpad(7, fromString("孙行者")));
assertEquals(
fromString("数据砖头孙行者孙行者孙行"),
fromString("数据砖头").rpad(12, fromString("孙行者")));
assertEquals(EMPTY_UTF8, fromString("数据砖头").lpad(-10, fromString("孙行者")));
assertEquals(EMPTY_UTF8, fromString("数据砖头").lpad(-10, EMPTY_UTF8));
assertEquals(fromString("数据砖头"), fromString("数据砖头").lpad(5, EMPTY_UTF8));
assertEquals(fromString("数据砖"), fromString("数据砖头").lpad(3, EMPTY_UTF8));
assertEquals(EMPTY_UTF8, EMPTY_UTF8.lpad(3, EMPTY_UTF8));
assertEquals(EMPTY_UTF8, fromString("数据砖头").rpad(-10, fromString("孙行者")));
assertEquals(EMPTY_UTF8, fromString("数据砖头").rpad(-10, EMPTY_UTF8));
assertEquals(fromString("数据砖头"), fromString("数据砖头").rpad(5, EMPTY_UTF8));
assertEquals(fromString("数据砖"), fromString("数据砖头").rpad(3, EMPTY_UTF8));
assertEquals(EMPTY_UTF8, EMPTY_UTF8.rpad(3, EMPTY_UTF8));
}
@Test
public void substringSQL() {
UTF8String e = fromString("example");
assertEquals(e.substringSQL(0, 2), fromString("ex"));
assertEquals(e.substringSQL(1, 2), fromString("ex"));
assertEquals(e.substringSQL(0, 7), fromString("example"));
assertEquals(e.substringSQL(1, 2), fromString("ex"));
assertEquals(e.substringSQL(0, 100), fromString("example"));
assertEquals(e.substringSQL(1, 100), fromString("example"));
assertEquals(e.substringSQL(2, 2), fromString("xa"));
assertEquals(e.substringSQL(1, 6), fromString("exampl"));
assertEquals(e.substringSQL(2, 100), fromString("xample"));
assertEquals(e.substringSQL(0, 0), fromString(""));
assertEquals(e.substringSQL(100, 4), EMPTY_UTF8);
assertEquals(e.substringSQL(0, Integer.MAX_VALUE), fromString("example"));
assertEquals(e.substringSQL(1, Integer.MAX_VALUE), fromString("example"));
assertEquals(e.substringSQL(2, Integer.MAX_VALUE), fromString("xample"));
}
@Test
public void split() {
assertTrue(Arrays.equals(fromString("ab,def,ghi").split(fromString(","), -1),
new UTF8String[]{fromString("ab"), fromString("def"), fromString("ghi")}));
assertTrue(Arrays.equals(fromString("ab,def,ghi").split(fromString(","), 2),
new UTF8String[]{fromString("ab"), fromString("def,ghi")}));
assertTrue(Arrays.equals(fromString("ab,def,ghi").split(fromString(","), 2),
new UTF8String[]{fromString("ab"), fromString("def,ghi")}));
}
@Test
public void levenshteinDistance() {
assertEquals(0, EMPTY_UTF8.levenshteinDistance(EMPTY_UTF8));
assertEquals(1, EMPTY_UTF8.levenshteinDistance(fromString("a")));
assertEquals(7, fromString("aaapppp").levenshteinDistance(EMPTY_UTF8));
assertEquals(1, fromString("frog").levenshteinDistance(fromString("fog")));
assertEquals(3, fromString("fly").levenshteinDistance(fromString("ant")));
assertEquals(7, fromString("elephant").levenshteinDistance(fromString("hippo")));
assertEquals(7, fromString("hippo").levenshteinDistance(fromString("elephant")));
assertEquals(8, fromString("hippo").levenshteinDistance(fromString("zzzzzzzz")));
assertEquals(1, fromString("hello").levenshteinDistance(fromString("hallo")));
assertEquals(4, fromString("世界千世").levenshteinDistance(fromString("千a世b")));
}
@Test
public void translate() {
assertEquals(
fromString("1a2s3ae"),
fromString("translate").translate(ImmutableMap.of(
'r', '1',
'n', '2',
'l', '3',
't', '\0'
)));
assertEquals(
fromString("translate"),
fromString("translate").translate(new HashMap<Character, Character>()));
assertEquals(
fromString("asae"),
fromString("translate").translate(ImmutableMap.of(
'r', '\0',
'n', '\0',
'l', '\0',
't', '\0'
)));
assertEquals(
fromString("aa世b"),
fromString("花花世界").translate(ImmutableMap.of(
'花', 'a',
'界', 'b'
)));
}
@Test
public void createBlankString() {
assertEquals(fromString(" "), blankString(1));
assertEquals(fromString(" "), blankString(2));
assertEquals(fromString(" "), blankString(3));
assertEquals(fromString(""), blankString(0));
}
@Test
public void findInSet() {
assertEquals(1, fromString("ab").findInSet(fromString("ab")));
assertEquals(2, fromString("a,b").findInSet(fromString("b")));
assertEquals(3, fromString("abc,b,ab,c,def").findInSet(fromString("ab")));
assertEquals(1, fromString("ab,abc,b,ab,c,def").findInSet(fromString("ab")));
assertEquals(4, fromString(",,,ab,abc,b,ab,c,def").findInSet(fromString("ab")));
assertEquals(1, fromString(",ab,abc,b,ab,c,def").findInSet(fromString("")));
assertEquals(4, fromString("数据砖头,abc,b,ab,c,def").findInSet(fromString("ab")));
assertEquals(6, fromString("数据砖头,abc,b,ab,c,def").findInSet(fromString("def")));
}
@Test
public void soundex() {
assertEquals(fromString("Robert").soundex(), fromString("R163"));
assertEquals(fromString("Rupert").soundex(), fromString("R163"));
assertEquals(fromString("Rubin").soundex(), fromString("R150"));
assertEquals(fromString("Ashcraft").soundex(), fromString("A261"));
assertEquals(fromString("Ashcroft").soundex(), fromString("A261"));
assertEquals(fromString("Burroughs").soundex(), fromString("B620"));
assertEquals(fromString("Burrows").soundex(), fromString("B620"));
assertEquals(fromString("Ekzampul").soundex(), fromString("E251"));
assertEquals(fromString("Example").soundex(), fromString("E251"));
assertEquals(fromString("Ellery").soundex(), fromString("E460"));
assertEquals(fromString("Euler").soundex(), fromString("E460"));
assertEquals(fromString("Ghosh").soundex(), fromString("G200"));
assertEquals(fromString("Gauss").soundex(), fromString("G200"));
assertEquals(fromString("Gutierrez").soundex(), fromString("G362"));
assertEquals(fromString("Heilbronn").soundex(), fromString("H416"));
assertEquals(fromString("Hilbert").soundex(), fromString("H416"));
assertEquals(fromString("Jackson").soundex(), fromString("J250"));
assertEquals(fromString("Kant").soundex(), fromString("K530"));
assertEquals(fromString("Knuth").soundex(), fromString("K530"));
assertEquals(fromString("Lee").soundex(), fromString("L000"));
assertEquals(fromString("Lukasiewicz").soundex(), fromString("L222"));
assertEquals(fromString("Lissajous").soundex(), fromString("L222"));
assertEquals(fromString("Ladd").soundex(), fromString("L300"));
assertEquals(fromString("Lloyd").soundex(), fromString("L300"));
assertEquals(fromString("Moses").soundex(), fromString("M220"));
assertEquals(fromString("O'Hara").soundex(), fromString("O600"));
assertEquals(fromString("Pfister").soundex(), fromString("P236"));
assertEquals(fromString("Rubin").soundex(), fromString("R150"));
assertEquals(fromString("Robert").soundex(), fromString("R163"));
assertEquals(fromString("Rupert").soundex(), fromString("R163"));
assertEquals(fromString("Soundex").soundex(), fromString("S532"));
assertEquals(fromString("Sownteks").soundex(), fromString("S532"));
assertEquals(fromString("Tymczak").soundex(), fromString("T522"));
assertEquals(fromString("VanDeusen").soundex(), fromString("V532"));
assertEquals(fromString("Washington").soundex(), fromString("W252"));
assertEquals(fromString("Wheaton").soundex(), fromString("W350"));
assertEquals(fromString("a").soundex(), fromString("A000"));
assertEquals(fromString("ab").soundex(), fromString("A100"));
assertEquals(fromString("abc").soundex(), fromString("A120"));
assertEquals(fromString("abcd").soundex(), fromString("A123"));
assertEquals(fromString("").soundex(), fromString(""));
assertEquals(fromString("123").soundex(), fromString("123"));
assertEquals(fromString("世界千世").soundex(), fromString("世界千世"));
}
@Test
public void writeToOutputStreamUnderflow() throws IOException {
// offset underflow is apparently supported?
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final byte[] test = "01234567".getBytes(StandardCharsets.UTF_8);
for (int i = 1; i <= Platform.BYTE_ARRAY_OFFSET; ++i) {
UTF8String.fromAddress(test, Platform.BYTE_ARRAY_OFFSET - i, test.length + i)
.writeTo(outputStream);
final ByteBuffer buffer = ByteBuffer.wrap(outputStream.toByteArray(), i, test.length);
assertEquals("01234567", StandardCharsets.UTF_8.decode(buffer).toString());
outputStream.reset();
}
}
@Test
public void writeToOutputStreamSlice() throws IOException {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final byte[] test = "01234567".getBytes(StandardCharsets.UTF_8);
for (int i = 0; i < test.length; ++i) {
for (int j = 0; j < test.length - i; ++j) {
UTF8String.fromAddress(test, Platform.BYTE_ARRAY_OFFSET + i, j)
.writeTo(outputStream);
assertArrayEquals(Arrays.copyOfRange(test, i, i + j), outputStream.toByteArray());
outputStream.reset();
}
}
}
@Test
public void writeToOutputStreamOverflow() throws IOException {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final byte[] test = "01234567".getBytes(StandardCharsets.UTF_8);
final HashSet<Long> offsets = new HashSet<>();
for (int i = 0; i < 16; ++i) {
// touch more points around MAX_VALUE
offsets.add((long) Integer.MAX_VALUE - i);
// subtract off BYTE_ARRAY_OFFSET to avoid wrapping around to a negative value,
// which will hit the slower copy path instead of the optimized one
offsets.add(Long.MAX_VALUE - BYTE_ARRAY_OFFSET - i);
}
for (long i = 1; i > 0L; i <<= 1) {
for (long j = 0; j < 32L; ++j) {
offsets.add(i + j);
}
}
for (final long offset : offsets) {
try {
fromAddress(test, BYTE_ARRAY_OFFSET + offset, test.length)
.writeTo(outputStream);
throw new IllegalStateException(Long.toString(offset));
} catch (ArrayIndexOutOfBoundsException e) {
// ignore
} finally {
outputStream.reset();
}
}
}
@Test
public void writeToOutputStream() throws IOException {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
EMPTY_UTF8.writeTo(outputStream);
assertEquals("", outputStream.toString("UTF-8"));
outputStream.reset();
fromString("数据砖很重").writeTo(outputStream);
assertEquals(
"数据砖很重",
outputStream.toString("UTF-8"));
outputStream.reset();
}
@Test
public void writeToOutputStreamIntArray() throws IOException {
// verify that writes work on objects that are not byte arrays
final ByteBuffer buffer = StandardCharsets.UTF_8.encode("大千世界");
buffer.position(0);
buffer.order(ByteOrder.nativeOrder());
final int length = buffer.limit();
assertEquals(12, length);
final int ints = length / 4;
final int[] array = new int[ints];
for (int i = 0; i < ints; ++i) {
array[i] = buffer.getInt();
}
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
fromAddress(array, Platform.INT_ARRAY_OFFSET, length)
.writeTo(outputStream);
assertEquals("大千世界", outputStream.toString("UTF-8"));
}
@Test
public void testToShort() throws IOException {
Map<String, Short> inputToExpectedOutput = new HashMap<>();
inputToExpectedOutput.put("1", (short) 1);
inputToExpectedOutput.put("+1", (short) 1);
inputToExpectedOutput.put("-1", (short) -1);
inputToExpectedOutput.put("0", (short) 0);
inputToExpectedOutput.put("1111.12345678901234567890", (short) 1111);
inputToExpectedOutput.put(String.valueOf(Short.MAX_VALUE), Short.MAX_VALUE);
inputToExpectedOutput.put(String.valueOf(Short.MIN_VALUE), Short.MIN_VALUE);
Random rand = new Random();
for (int i = 0; i < 10; i++) {
short value = (short) rand.nextInt();
inputToExpectedOutput.put(String.valueOf(value), value);
}
IntWrapper wrapper = new IntWrapper();
for (Map.Entry<String, Short> entry : inputToExpectedOutput.entrySet()) {
assertTrue(entry.getKey(), UTF8String.fromString(entry.getKey()).toShort(wrapper));
assertEquals((short) entry.getValue(), wrapper.value);
}
List<String> negativeInputs =
Arrays.asList("", " ", "null", "NULL", "\n", "~1212121", "3276700");
for (String negativeInput : negativeInputs) {
assertFalse(negativeInput, UTF8String.fromString(negativeInput).toShort(wrapper));
}
}
@Test
public void testToByte() throws IOException {
Map<String, Byte> inputToExpectedOutput = new HashMap<>();
inputToExpectedOutput.put("1", (byte) 1);
inputToExpectedOutput.put("+1",(byte) 1);
inputToExpectedOutput.put("-1", (byte) -1);
inputToExpectedOutput.put("0", (byte) 0);
inputToExpectedOutput.put("111.12345678901234567890", (byte) 111);
inputToExpectedOutput.put(String.valueOf(Byte.MAX_VALUE), Byte.MAX_VALUE);
inputToExpectedOutput.put(String.valueOf(Byte.MIN_VALUE), Byte.MIN_VALUE);
Random rand = new Random();
for (int i = 0; i < 10; i++) {
byte value = (byte) rand.nextInt();
inputToExpectedOutput.put(String.valueOf(value), value);
}
IntWrapper intWrapper = new IntWrapper();
for (Map.Entry<String, Byte> entry : inputToExpectedOutput.entrySet()) {
assertTrue(entry.getKey(), UTF8String.fromString(entry.getKey()).toByte(intWrapper));
assertEquals((byte) entry.getValue(), intWrapper.value);
}
List<String> negativeInputs =
Arrays.asList("", " ", "null", "NULL", "\n", "~1212121", "12345678901234567890");
for (String negativeInput : negativeInputs) {
assertFalse(negativeInput, UTF8String.fromString(negativeInput).toByte(intWrapper));
}
}
@Test
public void testToInt() throws IOException {
Map<String, Integer> inputToExpectedOutput = new HashMap<>();
inputToExpectedOutput.put("1", 1);
inputToExpectedOutput.put("+1", 1);
inputToExpectedOutput.put("-1", -1);
inputToExpectedOutput.put("0", 0);
inputToExpectedOutput.put("11111.1234567", 11111);
inputToExpectedOutput.put(String.valueOf(Integer.MAX_VALUE), Integer.MAX_VALUE);
inputToExpectedOutput.put(String.valueOf(Integer.MIN_VALUE), Integer.MIN_VALUE);
Random rand = new Random();
for (int i = 0; i < 10; i++) {
int value = rand.nextInt();
inputToExpectedOutput.put(String.valueOf(value), value);
}
IntWrapper intWrapper = new IntWrapper();
for (Map.Entry<String, Integer> entry : inputToExpectedOutput.entrySet()) {
assertTrue(entry.getKey(), UTF8String.fromString(entry.getKey()).toInt(intWrapper));
assertEquals((int) entry.getValue(), intWrapper.value);
}
List<String> negativeInputs =
Arrays.asList("", " ", "null", "NULL", "\n", "~1212121", "12345678901234567890");
for (String negativeInput : negativeInputs) {
assertFalse(negativeInput, UTF8String.fromString(negativeInput).toInt(intWrapper));
}
}
@Test
public void testToLong() throws IOException {
Map<String, Long> inputToExpectedOutput = new HashMap<>();
inputToExpectedOutput.put("1", 1L);
inputToExpectedOutput.put("+1", 1L);
inputToExpectedOutput.put("-1", -1L);
inputToExpectedOutput.put("0", 0L);
inputToExpectedOutput.put("1076753423.12345678901234567890", 1076753423L);
inputToExpectedOutput.put(String.valueOf(Long.MAX_VALUE), Long.MAX_VALUE);
inputToExpectedOutput.put(String.valueOf(Long.MIN_VALUE), Long.MIN_VALUE);
Random rand = new Random();
for (int i = 0; i < 10; i++) {
long value = rand.nextLong();
inputToExpectedOutput.put(String.valueOf(value), value);
}
LongWrapper wrapper = new LongWrapper();
for (Map.Entry<String, Long> entry : inputToExpectedOutput.entrySet()) {
assertTrue(entry.getKey(), UTF8String.fromString(entry.getKey()).toLong(wrapper));
assertEquals((long) entry.getValue(), wrapper.value);
}
List<String> negativeInputs = Arrays.asList("", " ", "null", "NULL", "\n", "~1212121",
"1234567890123456789012345678901234");
for (String negativeInput : negativeInputs) {
assertFalse(negativeInput, UTF8String.fromString(negativeInput).toLong(wrapper));
}
}
@Test
public void trimBothWithTrimString() {
assertEquals(fromString("hello"), fromString(" hello ").trim(fromString(" ")));
assertEquals(fromString("o"), fromString(" hello ").trim(fromString(" hle")));
assertEquals(fromString("h e"), fromString("ooh e ooo").trim(fromString("o ")));
assertEquals(fromString(""), fromString("ooo...oooo").trim(fromString("o.")));
assertEquals(fromString("b"), fromString("%^b[]@").trim(fromString("][@^%")));
assertEquals(EMPTY_UTF8, fromString(" ").trim(fromString(" ")));
assertEquals(fromString("数据砖头"), fromString(" 数据砖头 ").trim());
assertEquals(fromString("数"), fromString("a数b").trim(fromString("ab")));
assertEquals(fromString(""), fromString("a").trim(fromString("a数b")));
assertEquals(fromString(""), fromString("数数 数数数").trim(fromString("数 ")));
assertEquals(fromString("据砖头"), fromString("数]数[数据砖头#数数").trim(fromString("[数]#")));
assertEquals(fromString("据砖头数数 "), fromString("数数数据砖头数数 ").trim(fromString("数")));
}
@Test
public void trimLeftWithTrimString() {
assertEquals(fromString(" hello "), fromString(" hello ").trimLeft(fromString("")));
assertEquals(fromString(""), fromString("a").trimLeft(fromString("a")));
assertEquals(fromString("b"), fromString("b").trimLeft(fromString("a")));
assertEquals(fromString("ba"), fromString("ba").trimLeft(fromString("a")));
assertEquals(fromString(""), fromString("aaaaaaa").trimLeft(fromString("a")));
assertEquals(fromString("trim"), fromString("oabtrim").trimLeft(fromString("bao")));
assertEquals(fromString("rim "), fromString("ooootrim ").trimLeft(fromString("otm")));
assertEquals(EMPTY_UTF8, fromString(" ").trimLeft(fromString(" ")));
assertEquals(fromString("数据砖头 "), fromString(" 数据砖头 ").trimLeft(fromString(" ")));
assertEquals(fromString("数"), fromString("数").trimLeft(fromString("a")));
assertEquals(fromString("a"), fromString("a").trimLeft(fromString("数")));
assertEquals(fromString("砖头数数"), fromString("数数数据砖头数数").trimLeft(fromString("据数")));
assertEquals(fromString("据砖头数数"), fromString(" 数数数据砖头数数").trimLeft(fromString("数 ")));
assertEquals(fromString("据砖头数数"), fromString("aa数数数据砖头数数").trimLeft(fromString("a数砖")));
assertEquals(fromString("$S,.$BR"), fromString(",,,,%$S,.$BR").trimLeft(fromString("%,")));
}
@Test
public void trimRightWithTrimString() {
assertEquals(fromString(" hello "), fromString(" hello ").trimRight(fromString("")));
assertEquals(fromString(""), fromString("a").trimRight(fromString("a")));
assertEquals(fromString("cc"), fromString("ccbaaaa").trimRight(fromString("ba")));
assertEquals(fromString(""), fromString("aabbbbaaa").trimRight(fromString("ab")));
assertEquals(fromString(" he"), fromString(" hello ").trimRight(fromString(" ol")));
assertEquals(fromString("oohell"),
fromString("oohellooo../*&").trimRight(fromString("./,&%*o")));
assertEquals(EMPTY_UTF8, fromString(" ").trimRight(fromString(" ")));
assertEquals(fromString(" 数据砖头"), fromString(" 数据砖头 ").trimRight(fromString(" ")));
assertEquals(fromString("数数砖头"), fromString("数数砖头数aa数").trimRight(fromString("a数")));
assertEquals(fromString(""), fromString("数数数据砖ab").trimRight(fromString("数据砖ab")));
assertEquals(fromString("头"), fromString("头a???/").trimRight(fromString("数?/*&^%a")));
assertEquals(fromString("头"), fromString("头数b数数 [").trimRight(fromString(" []数b")));
}
@Test
public void skipWrongFirstByte() {
int[] wrongFirstBytes = {
0x80, 0x9F, 0xBF, // Skip Continuation bytes
0xC0, 0xC2, // 0xC0..0xC1 - disallowed in UTF-8
// 0xF5..0xFF - disallowed in UTF-8
0xF5, 0xF6, 0xF7, 0xF8, 0xF9,
0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF
};
byte[] c = new byte[1];
for (int i = 0; i < wrongFirstBytes.length; ++i) {
c[0] = (byte)wrongFirstBytes[i];
assertEquals(fromBytes(c).numChars(), 1);
}
}
}
| 9,925 |
0 | Create_ds/spark/common/unsafe/src/test/java/org/apache/spark/unsafe | Create_ds/spark/common/unsafe/src/test/java/org/apache/spark/unsafe/hash/Murmur3_x86_32Suite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.hash;
import java.nio.charset.StandardCharsets;
import java.util.HashSet;
import java.util.Random;
import java.util.Set;
import scala.util.hashing.MurmurHash3$;
import org.apache.spark.unsafe.Platform;
import org.junit.Assert;
import org.junit.Test;
/**
* Test file based on Guava's Murmur3Hash32Test.
*/
public class Murmur3_x86_32Suite {
private static final Murmur3_x86_32 hasher = new Murmur3_x86_32(0);
@Test
public void testKnownIntegerInputs() {
Assert.assertEquals(593689054, hasher.hashInt(0));
Assert.assertEquals(-189366624, hasher.hashInt(-42));
Assert.assertEquals(-1134849565, hasher.hashInt(42));
Assert.assertEquals(-1718298732, hasher.hashInt(Integer.MIN_VALUE));
Assert.assertEquals(-1653689534, hasher.hashInt(Integer.MAX_VALUE));
}
@Test
public void testKnownLongInputs() {
Assert.assertEquals(1669671676, hasher.hashLong(0L));
Assert.assertEquals(-846261623, hasher.hashLong(-42L));
Assert.assertEquals(1871679806, hasher.hashLong(42L));
Assert.assertEquals(1366273829, hasher.hashLong(Long.MIN_VALUE));
Assert.assertEquals(-2106506049, hasher.hashLong(Long.MAX_VALUE));
}
// SPARK-23381 Check whether the hash of the byte array is the same as another implementations
@Test
public void testKnownBytesInputs() {
byte[] test = "test".getBytes(StandardCharsets.UTF_8);
Assert.assertEquals(MurmurHash3$.MODULE$.bytesHash(test, 0),
Murmur3_x86_32.hashUnsafeBytes2(test, Platform.BYTE_ARRAY_OFFSET, test.length, 0));
byte[] test1 = "test1".getBytes(StandardCharsets.UTF_8);
Assert.assertEquals(MurmurHash3$.MODULE$.bytesHash(test1, 0),
Murmur3_x86_32.hashUnsafeBytes2(test1, Platform.BYTE_ARRAY_OFFSET, test1.length, 0));
byte[] te = "te".getBytes(StandardCharsets.UTF_8);
Assert.assertEquals(MurmurHash3$.MODULE$.bytesHash(te, 0),
Murmur3_x86_32.hashUnsafeBytes2(te, Platform.BYTE_ARRAY_OFFSET, te.length, 0));
byte[] tes = "tes".getBytes(StandardCharsets.UTF_8);
Assert.assertEquals(MurmurHash3$.MODULE$.bytesHash(tes, 0),
Murmur3_x86_32.hashUnsafeBytes2(tes, Platform.BYTE_ARRAY_OFFSET, tes.length, 0));
}
@Test
public void randomizedStressTest() {
int size = 65536;
Random rand = new Random();
// A set used to track collision rate.
Set<Integer> hashcodes = new HashSet<>();
for (int i = 0; i < size; i++) {
int vint = rand.nextInt();
long lint = rand.nextLong();
Assert.assertEquals(hasher.hashInt(vint), hasher.hashInt(vint));
Assert.assertEquals(hasher.hashLong(lint), hasher.hashLong(lint));
hashcodes.add(hasher.hashLong(lint));
}
// A very loose bound.
Assert.assertTrue(hashcodes.size() > size * 0.95);
}
@Test
public void randomizedStressTestBytes() {
int size = 65536;
Random rand = new Random();
// A set used to track collision rate.
Set<Integer> hashcodes = new HashSet<>();
for (int i = 0; i < size; i++) {
int byteArrSize = rand.nextInt(100) * 8;
byte[] bytes = new byte[byteArrSize];
rand.nextBytes(bytes);
Assert.assertEquals(
hasher.hashUnsafeWords(bytes, Platform.BYTE_ARRAY_OFFSET, byteArrSize),
hasher.hashUnsafeWords(bytes, Platform.BYTE_ARRAY_OFFSET, byteArrSize));
hashcodes.add(hasher.hashUnsafeWords(
bytes, Platform.BYTE_ARRAY_OFFSET, byteArrSize));
}
// A very loose bound.
Assert.assertTrue(hashcodes.size() > size * 0.95);
}
@Test
public void randomizedStressTestPaddedStrings() {
int size = 64000;
// A set used to track collision rate.
Set<Integer> hashcodes = new HashSet<>();
for (int i = 0; i < size; i++) {
int byteArrSize = 8;
byte[] strBytes = String.valueOf(i).getBytes(StandardCharsets.UTF_8);
byte[] paddedBytes = new byte[byteArrSize];
System.arraycopy(strBytes, 0, paddedBytes, 0, strBytes.length);
Assert.assertEquals(
hasher.hashUnsafeWords(paddedBytes, Platform.BYTE_ARRAY_OFFSET, byteArrSize),
hasher.hashUnsafeWords(paddedBytes, Platform.BYTE_ARRAY_OFFSET, byteArrSize));
hashcodes.add(hasher.hashUnsafeWords(
paddedBytes, Platform.BYTE_ARRAY_OFFSET, byteArrSize));
}
// A very loose bound.
Assert.assertTrue(hashcodes.size() > size * 0.95);
}
}
| 9,926 |
0 | Create_ds/spark/common/unsafe/src/test/java/org/apache/spark/unsafe | Create_ds/spark/common/unsafe/src/test/java/org/apache/spark/unsafe/array/LongArraySuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.array;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.unsafe.memory.MemoryBlock;
public class LongArraySuite {
@Test
public void basicTest() {
long[] bytes = new long[2];
LongArray arr = new LongArray(MemoryBlock.fromLongArray(bytes));
arr.set(0, 1L);
arr.set(1, 2L);
arr.set(1, 3L);
Assert.assertEquals(2, arr.size());
Assert.assertEquals(1L, arr.get(0));
Assert.assertEquals(3L, arr.get(1));
arr.zeroOut();
Assert.assertEquals(0L, arr.get(0));
Assert.assertEquals(0L, arr.get(1));
}
}
| 9,927 |
0 | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe/UnsafeAlignedOffset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe;
/**
* Class to make changes to record length offsets uniform through out
* various areas of Apache Spark core and unsafe. The SPARC platform
* requires this because using a 4 byte Int for record lengths causes
* the entire record of 8 byte Items to become misaligned by 4 bytes.
* Using a 8 byte long for record length keeps things 8 byte aligned.
*/
public class UnsafeAlignedOffset {
private static final int UAO_SIZE = Platform.unaligned() ? 4 : 8;
public static int getUaoSize() {
return UAO_SIZE;
}
public static int getSize(Object object, long offset) {
switch (UAO_SIZE) {
case 4:
return Platform.getInt(object, offset);
case 8:
return (int)Platform.getLong(object, offset);
default:
throw new AssertionError("Illegal UAO_SIZE");
}
}
public static void putSize(Object object, long offset, int value) {
switch (UAO_SIZE) {
case 4:
Platform.putInt(object, offset, value);
break;
case 8:
Platform.putLong(object, offset, value);
break;
default:
throw new AssertionError("Illegal UAO_SIZE");
}
}
}
| 9,928 |
0 | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe/Platform.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.nio.ByteBuffer;
import sun.misc.Cleaner;
import sun.misc.Unsafe;
public final class Platform {
private static final Unsafe _UNSAFE;
public static final int BOOLEAN_ARRAY_OFFSET;
public static final int BYTE_ARRAY_OFFSET;
public static final int SHORT_ARRAY_OFFSET;
public static final int INT_ARRAY_OFFSET;
public static final int LONG_ARRAY_OFFSET;
public static final int FLOAT_ARRAY_OFFSET;
public static final int DOUBLE_ARRAY_OFFSET;
private static final boolean unaligned;
static {
boolean _unaligned;
String arch = System.getProperty("os.arch", "");
if (arch.equals("ppc64le") || arch.equals("ppc64")) {
// Since java.nio.Bits.unaligned() doesn't return true on ppc (See JDK-8165231), but
// ppc64 and ppc64le support it
_unaligned = true;
} else {
try {
Class<?> bitsClass =
Class.forName("java.nio.Bits", false, ClassLoader.getSystemClassLoader());
Method unalignedMethod = bitsClass.getDeclaredMethod("unaligned");
unalignedMethod.setAccessible(true);
_unaligned = Boolean.TRUE.equals(unalignedMethod.invoke(null));
} catch (Throwable t) {
// We at least know x86 and x64 support unaligned access.
//noinspection DynamicRegexReplaceableByCompiledPattern
_unaligned = arch.matches("^(i[3-6]86|x86(_64)?|x64|amd64|aarch64)$");
}
}
unaligned = _unaligned;
}
/**
* @return true when running JVM is having sun's Unsafe package available in it and underlying
* system having unaligned-access capability.
*/
public static boolean unaligned() {
return unaligned;
}
public static int getInt(Object object, long offset) {
return _UNSAFE.getInt(object, offset);
}
public static void putInt(Object object, long offset, int value) {
_UNSAFE.putInt(object, offset, value);
}
public static boolean getBoolean(Object object, long offset) {
return _UNSAFE.getBoolean(object, offset);
}
public static void putBoolean(Object object, long offset, boolean value) {
_UNSAFE.putBoolean(object, offset, value);
}
public static byte getByte(Object object, long offset) {
return _UNSAFE.getByte(object, offset);
}
public static void putByte(Object object, long offset, byte value) {
_UNSAFE.putByte(object, offset, value);
}
public static short getShort(Object object, long offset) {
return _UNSAFE.getShort(object, offset);
}
public static void putShort(Object object, long offset, short value) {
_UNSAFE.putShort(object, offset, value);
}
public static long getLong(Object object, long offset) {
return _UNSAFE.getLong(object, offset);
}
public static void putLong(Object object, long offset, long value) {
_UNSAFE.putLong(object, offset, value);
}
public static float getFloat(Object object, long offset) {
return _UNSAFE.getFloat(object, offset);
}
public static void putFloat(Object object, long offset, float value) {
_UNSAFE.putFloat(object, offset, value);
}
public static double getDouble(Object object, long offset) {
return _UNSAFE.getDouble(object, offset);
}
public static void putDouble(Object object, long offset, double value) {
_UNSAFE.putDouble(object, offset, value);
}
public static Object getObjectVolatile(Object object, long offset) {
return _UNSAFE.getObjectVolatile(object, offset);
}
public static void putObjectVolatile(Object object, long offset, Object value) {
_UNSAFE.putObjectVolatile(object, offset, value);
}
public static long allocateMemory(long size) {
return _UNSAFE.allocateMemory(size);
}
public static void freeMemory(long address) {
_UNSAFE.freeMemory(address);
}
public static long reallocateMemory(long address, long oldSize, long newSize) {
long newMemory = _UNSAFE.allocateMemory(newSize);
copyMemory(null, address, null, newMemory, oldSize);
freeMemory(address);
return newMemory;
}
/**
* Uses internal JDK APIs to allocate a DirectByteBuffer while ignoring the JVM's
* MaxDirectMemorySize limit (the default limit is too low and we do not want to require users
* to increase it).
*/
@SuppressWarnings("unchecked")
public static ByteBuffer allocateDirectBuffer(int size) {
try {
Class<?> cls = Class.forName("java.nio.DirectByteBuffer");
Constructor<?> constructor = cls.getDeclaredConstructor(Long.TYPE, Integer.TYPE);
constructor.setAccessible(true);
Field cleanerField = cls.getDeclaredField("cleaner");
cleanerField.setAccessible(true);
long memory = allocateMemory(size);
ByteBuffer buffer = (ByteBuffer) constructor.newInstance(memory, size);
Cleaner cleaner = Cleaner.create(buffer, () -> freeMemory(memory));
cleanerField.set(buffer, cleaner);
return buffer;
} catch (Exception e) {
throwException(e);
}
throw new IllegalStateException("unreachable");
}
public static void setMemory(Object object, long offset, long size, byte value) {
_UNSAFE.setMemory(object, offset, size, value);
}
public static void setMemory(long address, byte value, long size) {
_UNSAFE.setMemory(address, size, value);
}
public static void copyMemory(
Object src, long srcOffset, Object dst, long dstOffset, long length) {
// Check if dstOffset is before or after srcOffset to determine if we should copy
// forward or backwards. This is necessary in case src and dst overlap.
if (dstOffset < srcOffset) {
while (length > 0) {
long size = Math.min(length, UNSAFE_COPY_THRESHOLD);
_UNSAFE.copyMemory(src, srcOffset, dst, dstOffset, size);
length -= size;
srcOffset += size;
dstOffset += size;
}
} else {
srcOffset += length;
dstOffset += length;
while (length > 0) {
long size = Math.min(length, UNSAFE_COPY_THRESHOLD);
srcOffset -= size;
dstOffset -= size;
_UNSAFE.copyMemory(src, srcOffset, dst, dstOffset, size);
length -= size;
}
}
}
/**
* Raises an exception bypassing compiler checks for checked exceptions.
*/
public static void throwException(Throwable t) {
_UNSAFE.throwException(t);
}
/**
* Limits the number of bytes to copy per {@link Unsafe#copyMemory(long, long, long)} to
* allow safepoint polling during a large copy.
*/
private static final long UNSAFE_COPY_THRESHOLD = 1024L * 1024L;
static {
sun.misc.Unsafe unsafe;
try {
Field unsafeField = Unsafe.class.getDeclaredField("theUnsafe");
unsafeField.setAccessible(true);
unsafe = (sun.misc.Unsafe) unsafeField.get(null);
} catch (Throwable cause) {
unsafe = null;
}
_UNSAFE = unsafe;
if (_UNSAFE != null) {
BOOLEAN_ARRAY_OFFSET = _UNSAFE.arrayBaseOffset(boolean[].class);
BYTE_ARRAY_OFFSET = _UNSAFE.arrayBaseOffset(byte[].class);
SHORT_ARRAY_OFFSET = _UNSAFE.arrayBaseOffset(short[].class);
INT_ARRAY_OFFSET = _UNSAFE.arrayBaseOffset(int[].class);
LONG_ARRAY_OFFSET = _UNSAFE.arrayBaseOffset(long[].class);
FLOAT_ARRAY_OFFSET = _UNSAFE.arrayBaseOffset(float[].class);
DOUBLE_ARRAY_OFFSET = _UNSAFE.arrayBaseOffset(double[].class);
} else {
BOOLEAN_ARRAY_OFFSET = 0;
BYTE_ARRAY_OFFSET = 0;
SHORT_ARRAY_OFFSET = 0;
INT_ARRAY_OFFSET = 0;
LONG_ARRAY_OFFSET = 0;
FLOAT_ARRAY_OFFSET = 0;
DOUBLE_ARRAY_OFFSET = 0;
}
}
}
| 9,929 |
0 | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe/KVIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe;
import java.io.IOException;
public abstract class KVIterator<K, V> {
public abstract boolean next() throws IOException;
public abstract K getKey();
public abstract V getValue();
public abstract void close();
}
| 9,930 |
0 | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe/bitset/BitSetMethods.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.bitset;
import org.apache.spark.unsafe.Platform;
/**
* Methods for working with fixed-size uncompressed bitsets.
*
* We assume that the bitset data is word-aligned (that is, a multiple of 8 bytes in length).
*
* Each bit occupies exactly one bit of storage.
*/
public final class BitSetMethods {
private static final long WORD_SIZE = 8;
private BitSetMethods() {
// Make the default constructor private, since this only holds static methods.
}
/**
* Sets the bit at the specified index to {@code true}.
*/
public static void set(Object baseObject, long baseOffset, int index) {
assert index >= 0 : "index (" + index + ") should >= 0";
final long mask = 1L << (index & 0x3f); // mod 64 and shift
final long wordOffset = baseOffset + (index >> 6) * WORD_SIZE;
final long word = Platform.getLong(baseObject, wordOffset);
Platform.putLong(baseObject, wordOffset, word | mask);
}
/**
* Sets the bit at the specified index to {@code false}.
*/
public static void unset(Object baseObject, long baseOffset, int index) {
assert index >= 0 : "index (" + index + ") should >= 0";
final long mask = 1L << (index & 0x3f); // mod 64 and shift
final long wordOffset = baseOffset + (index >> 6) * WORD_SIZE;
final long word = Platform.getLong(baseObject, wordOffset);
Platform.putLong(baseObject, wordOffset, word & ~mask);
}
/**
* Returns {@code true} if the bit is set at the specified index.
*/
public static boolean isSet(Object baseObject, long baseOffset, int index) {
assert index >= 0 : "index (" + index + ") should >= 0";
final long mask = 1L << (index & 0x3f); // mod 64 and shift
final long wordOffset = baseOffset + (index >> 6) * WORD_SIZE;
final long word = Platform.getLong(baseObject, wordOffset);
return (word & mask) != 0;
}
/**
* Returns {@code true} if any bit is set.
*/
public static boolean anySet(Object baseObject, long baseOffset, long bitSetWidthInWords) {
long addr = baseOffset;
for (int i = 0; i < bitSetWidthInWords; i++, addr += WORD_SIZE) {
if (Platform.getLong(baseObject, addr) != 0) {
return true;
}
}
return false;
}
/**
* Returns the index of the first bit that is set to true that occurs on or after the
* specified starting index. If no such bit exists then {@code -1} is returned.
* <p>
* To iterate over the true bits in a BitSet, use the following loop:
* <pre>
* <code>
* for (long i = bs.nextSetBit(0, sizeInWords); i >= 0;
* i = bs.nextSetBit(i + 1, sizeInWords)) {
* // operate on index i here
* }
* </code>
* </pre>
*
* @param fromIndex the index to start checking from (inclusive)
* @param bitsetSizeInWords the size of the bitset, measured in 8-byte words
* @return the index of the next set bit, or -1 if there is no such bit
*/
public static int nextSetBit(
Object baseObject,
long baseOffset,
int fromIndex,
int bitsetSizeInWords) {
int wi = fromIndex >> 6;
if (wi >= bitsetSizeInWords) {
return -1;
}
// Try to find the next set bit in the current word
final int subIndex = fromIndex & 0x3f;
long word = Platform.getLong(baseObject, baseOffset + wi * WORD_SIZE) >> subIndex;
if (word != 0) {
return (wi << 6) + subIndex + java.lang.Long.numberOfTrailingZeros(word);
}
// Find the next set bit in the rest of the words
wi += 1;
while (wi < bitsetSizeInWords) {
word = Platform.getLong(baseObject, baseOffset + wi * WORD_SIZE);
if (word != 0) {
return (wi << 6) + java.lang.Long.numberOfTrailingZeros(word);
}
wi += 1;
}
return -1;
}
}
| 9,931 |
0 | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe/types/CalendarInterval.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.types;
import java.io.Serializable;
import java.util.Locale;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* The internal representation of interval type.
*/
public final class CalendarInterval implements Serializable {
public static final long MICROS_PER_MILLI = 1000L;
public static final long MICROS_PER_SECOND = MICROS_PER_MILLI * 1000;
public static final long MICROS_PER_MINUTE = MICROS_PER_SECOND * 60;
public static final long MICROS_PER_HOUR = MICROS_PER_MINUTE * 60;
public static final long MICROS_PER_DAY = MICROS_PER_HOUR * 24;
public static final long MICROS_PER_WEEK = MICROS_PER_DAY * 7;
/**
* A function to generate regex which matches interval string's unit part like "3 years".
*
* First, we can leave out some units in interval string, and we only care about the value of
* unit, so here we use non-capturing group to wrap the actual regex.
* At the beginning of the actual regex, we should match spaces before the unit part.
* Next is the number part, starts with an optional "-" to represent negative value. We use
* capturing group to wrap this part as we need the value later.
* Finally is the unit name, ends with an optional "s".
*/
private static String unitRegex(String unit) {
return "(?:\\s+(-?\\d+)\\s+" + unit + "s?)?";
}
private static Pattern p = Pattern.compile("interval" + unitRegex("year") + unitRegex("month") +
unitRegex("week") + unitRegex("day") + unitRegex("hour") + unitRegex("minute") +
unitRegex("second") + unitRegex("millisecond") + unitRegex("microsecond"));
private static Pattern yearMonthPattern =
Pattern.compile("^(?:['|\"])?([+|-])?(\\d+)-(\\d+)(?:['|\"])?$");
private static Pattern dayTimePattern =
Pattern.compile("^(?:['|\"])?([+|-])?(\\d+) (\\d+):(\\d+):(\\d+)(\\.(\\d+))?(?:['|\"])?$");
private static Pattern quoteTrimPattern = Pattern.compile("^(?:['|\"])?(.*?)(?:['|\"])?$");
private static long toLong(String s) {
if (s == null) {
return 0;
} else {
return Long.parseLong(s);
}
}
/**
* Convert a string to CalendarInterval. Return null if the input string is not a valid interval.
* This method is case-sensitive and all characters in the input string should be in lower case.
*/
public static CalendarInterval fromString(String s) {
if (s == null) {
return null;
}
s = s.trim();
Matcher m = p.matcher(s);
if (!m.matches() || s.equals("interval")) {
return null;
} else {
long months = toLong(m.group(1)) * 12 + toLong(m.group(2));
long microseconds = toLong(m.group(3)) * MICROS_PER_WEEK;
microseconds += toLong(m.group(4)) * MICROS_PER_DAY;
microseconds += toLong(m.group(5)) * MICROS_PER_HOUR;
microseconds += toLong(m.group(6)) * MICROS_PER_MINUTE;
microseconds += toLong(m.group(7)) * MICROS_PER_SECOND;
microseconds += toLong(m.group(8)) * MICROS_PER_MILLI;
microseconds += toLong(m.group(9));
return new CalendarInterval((int) months, microseconds);
}
}
/**
* Convert a string to CalendarInterval. Unlike fromString, this method is case-insensitive and
* will throw IllegalArgumentException when the input string is not a valid interval.
*
* @throws IllegalArgumentException if the string is not a valid internal.
*/
public static CalendarInterval fromCaseInsensitiveString(String s) {
if (s == null || s.trim().isEmpty()) {
throw new IllegalArgumentException("Interval cannot be null or blank.");
}
String sInLowerCase = s.trim().toLowerCase(Locale.ROOT);
String interval =
sInLowerCase.startsWith("interval ") ? sInLowerCase : "interval " + sInLowerCase;
CalendarInterval cal = fromString(interval);
if (cal == null) {
throw new IllegalArgumentException("Invalid interval: " + s);
}
return cal;
}
public static long toLongWithRange(String fieldName,
String s, long minValue, long maxValue) throws IllegalArgumentException {
long result = 0;
if (s != null) {
result = Long.parseLong(s);
if (result < minValue || result > maxValue) {
throw new IllegalArgumentException(String.format("%s %d outside range [%d, %d]",
fieldName, result, minValue, maxValue));
}
}
return result;
}
/**
* Parse YearMonth string in form: [-]YYYY-MM
*
* adapted from HiveIntervalYearMonth.valueOf
*/
public static CalendarInterval fromYearMonthString(String s) throws IllegalArgumentException {
CalendarInterval result = null;
if (s == null) {
throw new IllegalArgumentException("Interval year-month string was null");
}
s = s.trim();
Matcher m = yearMonthPattern.matcher(s);
if (!m.matches()) {
throw new IllegalArgumentException(
"Interval string does not match year-month format of 'y-m': " + s);
} else {
try {
int sign = m.group(1) != null && m.group(1).equals("-") ? -1 : 1;
int years = (int) toLongWithRange("year", m.group(2), 0, Integer.MAX_VALUE);
int months = (int) toLongWithRange("month", m.group(3), 0, 11);
result = new CalendarInterval(sign * (years * 12 + months), 0);
} catch (Exception e) {
throw new IllegalArgumentException(
"Error parsing interval year-month string: " + e.getMessage(), e);
}
}
return result;
}
/**
* Parse dayTime string in form: [-]d HH:mm:ss.nnnnnnnnn
*
* adapted from HiveIntervalDayTime.valueOf
*/
public static CalendarInterval fromDayTimeString(String s) throws IllegalArgumentException {
CalendarInterval result = null;
if (s == null) {
throw new IllegalArgumentException("Interval day-time string was null");
}
s = s.trim();
Matcher m = dayTimePattern.matcher(s);
if (!m.matches()) {
throw new IllegalArgumentException(
"Interval string does not match day-time format of 'd h:m:s.n': " + s);
} else {
try {
int sign = m.group(1) != null && m.group(1).equals("-") ? -1 : 1;
long days = toLongWithRange("day", m.group(2), 0, Integer.MAX_VALUE);
long hours = toLongWithRange("hour", m.group(3), 0, 23);
long minutes = toLongWithRange("minute", m.group(4), 0, 59);
long seconds = toLongWithRange("second", m.group(5), 0, 59);
// Hive allow nanosecond precision interval
String nanoStr = m.group(7) == null ? null : (m.group(7) + "000000000").substring(0, 9);
long nanos = toLongWithRange("nanosecond", nanoStr, 0L, 999999999L);
result = new CalendarInterval(0, sign * (
days * MICROS_PER_DAY + hours * MICROS_PER_HOUR + minutes * MICROS_PER_MINUTE +
seconds * MICROS_PER_SECOND + nanos / 1000L));
} catch (Exception e) {
throw new IllegalArgumentException(
"Error parsing interval day-time string: " + e.getMessage(), e);
}
}
return result;
}
public static CalendarInterval fromSingleUnitString(String unit, String s)
throws IllegalArgumentException {
CalendarInterval result = null;
if (s == null) {
throw new IllegalArgumentException(String.format("Interval %s string was null", unit));
}
s = s.trim();
Matcher m = quoteTrimPattern.matcher(s);
if (!m.matches()) {
throw new IllegalArgumentException(
"Interval string does not match day-time format of 'd h:m:s.n': " + s);
} else {
try {
switch (unit) {
case "year":
int year = (int) toLongWithRange("year", m.group(1),
Integer.MIN_VALUE / 12, Integer.MAX_VALUE / 12);
result = new CalendarInterval(year * 12, 0L);
break;
case "month":
int month = (int) toLongWithRange("month", m.group(1),
Integer.MIN_VALUE, Integer.MAX_VALUE);
result = new CalendarInterval(month, 0L);
break;
case "week":
long week = toLongWithRange("week", m.group(1),
Long.MIN_VALUE / MICROS_PER_WEEK, Long.MAX_VALUE / MICROS_PER_WEEK);
result = new CalendarInterval(0, week * MICROS_PER_WEEK);
break;
case "day":
long day = toLongWithRange("day", m.group(1),
Long.MIN_VALUE / MICROS_PER_DAY, Long.MAX_VALUE / MICROS_PER_DAY);
result = new CalendarInterval(0, day * MICROS_PER_DAY);
break;
case "hour":
long hour = toLongWithRange("hour", m.group(1),
Long.MIN_VALUE / MICROS_PER_HOUR, Long.MAX_VALUE / MICROS_PER_HOUR);
result = new CalendarInterval(0, hour * MICROS_PER_HOUR);
break;
case "minute":
long minute = toLongWithRange("minute", m.group(1),
Long.MIN_VALUE / MICROS_PER_MINUTE, Long.MAX_VALUE / MICROS_PER_MINUTE);
result = new CalendarInterval(0, minute * MICROS_PER_MINUTE);
break;
case "second": {
long micros = parseSecondNano(m.group(1));
result = new CalendarInterval(0, micros);
break;
}
case "millisecond":
long millisecond = toLongWithRange("millisecond", m.group(1),
Long.MIN_VALUE / MICROS_PER_MILLI, Long.MAX_VALUE / MICROS_PER_MILLI);
result = new CalendarInterval(0, millisecond * MICROS_PER_MILLI);
break;
case "microsecond": {
long micros = Long.parseLong(m.group(1));
result = new CalendarInterval(0, micros);
break;
}
}
} catch (Exception e) {
throw new IllegalArgumentException("Error parsing interval string: " + e.getMessage(), e);
}
}
return result;
}
/**
* Parse second_nano string in ss.nnnnnnnnn format to microseconds
*/
public static long parseSecondNano(String secondNano) throws IllegalArgumentException {
String[] parts = secondNano.split("\\.");
if (parts.length == 1) {
return toLongWithRange("second", parts[0], Long.MIN_VALUE / MICROS_PER_SECOND,
Long.MAX_VALUE / MICROS_PER_SECOND) * MICROS_PER_SECOND;
} else if (parts.length == 2) {
long seconds = parts[0].equals("") ? 0L : toLongWithRange("second", parts[0],
Long.MIN_VALUE / MICROS_PER_SECOND, Long.MAX_VALUE / MICROS_PER_SECOND);
long nanos = toLongWithRange("nanosecond", parts[1], 0L, 999999999L);
return seconds * MICROS_PER_SECOND + nanos / 1000L;
} else {
throw new IllegalArgumentException(
"Interval string does not match second-nano format of ss.nnnnnnnnn");
}
}
public final int months;
public final long microseconds;
public long milliseconds() {
return this.microseconds / MICROS_PER_MILLI;
}
public CalendarInterval(int months, long microseconds) {
this.months = months;
this.microseconds = microseconds;
}
public CalendarInterval add(CalendarInterval that) {
int months = this.months + that.months;
long microseconds = this.microseconds + that.microseconds;
return new CalendarInterval(months, microseconds);
}
public CalendarInterval subtract(CalendarInterval that) {
int months = this.months - that.months;
long microseconds = this.microseconds - that.microseconds;
return new CalendarInterval(months, microseconds);
}
public CalendarInterval negate() {
return new CalendarInterval(-this.months, -this.microseconds);
}
@Override
public boolean equals(Object other) {
if (this == other) return true;
if (other == null || !(other instanceof CalendarInterval)) return false;
CalendarInterval o = (CalendarInterval) other;
return this.months == o.months && this.microseconds == o.microseconds;
}
@Override
public int hashCode() {
return 31 * months + (int) microseconds;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("interval");
if (months != 0) {
appendUnit(sb, months / 12, "year");
appendUnit(sb, months % 12, "month");
}
if (microseconds != 0) {
long rest = microseconds;
appendUnit(sb, rest / MICROS_PER_WEEK, "week");
rest %= MICROS_PER_WEEK;
appendUnit(sb, rest / MICROS_PER_DAY, "day");
rest %= MICROS_PER_DAY;
appendUnit(sb, rest / MICROS_PER_HOUR, "hour");
rest %= MICROS_PER_HOUR;
appendUnit(sb, rest / MICROS_PER_MINUTE, "minute");
rest %= MICROS_PER_MINUTE;
appendUnit(sb, rest / MICROS_PER_SECOND, "second");
rest %= MICROS_PER_SECOND;
appendUnit(sb, rest / MICROS_PER_MILLI, "millisecond");
rest %= MICROS_PER_MILLI;
appendUnit(sb, rest, "microsecond");
} else if (months == 0) {
sb.append(" 0 microseconds");
}
return sb.toString();
}
private void appendUnit(StringBuilder sb, long value, String unit) {
if (value != 0) {
sb.append(' ').append(value).append(' ').append(unit).append('s');
}
}
}
| 9,932 |
0 | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe/types/ByteArray.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.types;
import java.util.Arrays;
import com.google.common.primitives.Ints;
import org.apache.spark.unsafe.Platform;
public final class ByteArray {
public static final byte[] EMPTY_BYTE = new byte[0];
/**
* Writes the content of a byte array into a memory address, identified by an object and an
* offset. The target memory address must already been allocated, and have enough space to
* hold all the bytes in this string.
*/
public static void writeToMemory(byte[] src, Object target, long targetOffset) {
Platform.copyMemory(src, Platform.BYTE_ARRAY_OFFSET, target, targetOffset, src.length);
}
/**
* Returns a 64-bit integer that can be used as the prefix used in sorting.
*/
public static long getPrefix(byte[] bytes) {
if (bytes == null) {
return 0L;
} else {
final int minLen = Math.min(bytes.length, 8);
long p = 0;
for (int i = 0; i < minLen; ++i) {
p |= ((long) Platform.getByte(bytes, Platform.BYTE_ARRAY_OFFSET + i) & 0xff)
<< (56 - 8 * i);
}
return p;
}
}
public static byte[] subStringSQL(byte[] bytes, int pos, int len) {
// This pos calculation is according to UTF8String#subStringSQL
if (pos > bytes.length) {
return EMPTY_BYTE;
}
int start = 0;
int end;
if (pos > 0) {
start = pos - 1;
} else if (pos < 0) {
start = bytes.length + pos;
}
if ((bytes.length - start) < len) {
end = bytes.length;
} else {
end = start + len;
}
start = Math.max(start, 0); // underflow
if (start >= end) {
return EMPTY_BYTE;
}
return Arrays.copyOfRange(bytes, start, end);
}
public static byte[] concat(byte[]... inputs) {
// Compute the total length of the result
long totalLength = 0;
for (int i = 0; i < inputs.length; i++) {
if (inputs[i] != null) {
totalLength += (long)inputs[i].length;
} else {
return null;
}
}
// Allocate a new byte array, and copy the inputs one by one into it
final byte[] result = new byte[Ints.checkedCast(totalLength)];
int offset = 0;
for (int i = 0; i < inputs.length; i++) {
int len = inputs[i].length;
Platform.copyMemory(
inputs[i], Platform.BYTE_ARRAY_OFFSET,
result, Platform.BYTE_ARRAY_OFFSET + offset,
len);
offset += len;
}
return result;
}
}
| 9,933 |
0 | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe/types/UTF8String.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.types;
import javax.annotation.Nonnull;
import java.io.*;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Map;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.KryoSerializable;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
import com.google.common.primitives.Ints;
import org.apache.spark.unsafe.Platform;
import org.apache.spark.unsafe.array.ByteArrayMethods;
import org.apache.spark.unsafe.hash.Murmur3_x86_32;
import static org.apache.spark.unsafe.Platform.*;
/**
* A UTF-8 String for internal Spark use.
* <p>
* A String encoded in UTF-8 as an Array[Byte], which can be used for comparison,
* search, see http://en.wikipedia.org/wiki/UTF-8 for details.
* <p>
* Note: This is not designed for general use cases, should not be used outside SQL.
*/
public final class UTF8String implements Comparable<UTF8String>, Externalizable, KryoSerializable,
Cloneable {
// These are only updated by readExternal() or read()
@Nonnull
private Object base;
private long offset;
private int numBytes;
public Object getBaseObject() { return base; }
public long getBaseOffset() { return offset; }
/**
* A char in UTF-8 encoding can take 1-4 bytes depending on the first byte which
* indicates the size of the char. See Unicode standard in page 126, Table 3-6:
* http://www.unicode.org/versions/Unicode10.0.0/UnicodeStandard-10.0.pdf
*
* Binary Hex Comments
* 0xxxxxxx 0x00..0x7F Only byte of a 1-byte character encoding
* 10xxxxxx 0x80..0xBF Continuation bytes (1-3 continuation bytes)
* 110xxxxx 0xC0..0xDF First byte of a 2-byte character encoding
* 1110xxxx 0xE0..0xEF First byte of a 3-byte character encoding
* 11110xxx 0xF0..0xF7 First byte of a 4-byte character encoding
*
* As a consequence of the well-formedness conditions specified in
* Table 3-7 (page 126), the following byte values are disallowed in UTF-8:
* C0–C1, F5–FF.
*/
private static byte[] bytesOfCodePointInUTF8 = {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x00..0x0F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x10..0x1F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x20..0x2F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x30..0x3F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x40..0x4F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x50..0x5F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x60..0x6F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x70..0x7F
// Continuation bytes cannot appear as the first byte
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0x80..0x8F
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0x90..0x9F
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0xA0..0xAF
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0xB0..0xBF
0, 0, // 0xC0..0xC1 - disallowed in UTF-8
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 0xC2..0xCF
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 0xD0..0xDF
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 0xE0..0xEF
4, 4, 4, 4, 4, // 0xF0..0xF4
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 // 0xF5..0xFF - disallowed in UTF-8
};
private static final boolean IS_LITTLE_ENDIAN =
ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN;
private static final UTF8String COMMA_UTF8 = UTF8String.fromString(",");
public static final UTF8String EMPTY_UTF8 = UTF8String.fromString("");
/**
* Creates an UTF8String from byte array, which should be encoded in UTF-8.
*
* Note: `bytes` will be hold by returned UTF8String.
*/
public static UTF8String fromBytes(byte[] bytes) {
if (bytes != null) {
return new UTF8String(bytes, BYTE_ARRAY_OFFSET, bytes.length);
} else {
return null;
}
}
/**
* Creates an UTF8String from byte array, which should be encoded in UTF-8.
*
* Note: `bytes` will be hold by returned UTF8String.
*/
public static UTF8String fromBytes(byte[] bytes, int offset, int numBytes) {
if (bytes != null) {
return new UTF8String(bytes, BYTE_ARRAY_OFFSET + offset, numBytes);
} else {
return null;
}
}
/**
* Creates an UTF8String from given address (base and offset) and length.
*/
public static UTF8String fromAddress(Object base, long offset, int numBytes) {
return new UTF8String(base, offset, numBytes);
}
/**
* Creates an UTF8String from String.
*/
public static UTF8String fromString(String str) {
return str == null ? null : fromBytes(str.getBytes(StandardCharsets.UTF_8));
}
/**
* Creates an UTF8String that contains `length` spaces.
*/
public static UTF8String blankString(int length) {
byte[] spaces = new byte[length];
Arrays.fill(spaces, (byte) ' ');
return fromBytes(spaces);
}
protected UTF8String(Object base, long offset, int numBytes) {
this.base = base;
this.offset = offset;
this.numBytes = numBytes;
}
// for serialization
public UTF8String() {
this(null, 0, 0);
}
/**
* Writes the content of this string into a memory address, identified by an object and an offset.
* The target memory address must already been allocated, and have enough space to hold all the
* bytes in this string.
*/
public void writeToMemory(Object target, long targetOffset) {
Platform.copyMemory(base, offset, target, targetOffset, numBytes);
}
public void writeTo(ByteBuffer buffer) {
assert(buffer.hasArray());
byte[] target = buffer.array();
int offset = buffer.arrayOffset();
int pos = buffer.position();
writeToMemory(target, Platform.BYTE_ARRAY_OFFSET + offset + pos);
buffer.position(pos + numBytes);
}
/**
* Returns a {@link ByteBuffer} wrapping the base object if it is a byte array
* or a copy of the data if the base object is not a byte array.
*
* Unlike getBytes this will not create a copy the array if this is a slice.
*/
@Nonnull
public ByteBuffer getByteBuffer() {
if (base instanceof byte[] && offset >= BYTE_ARRAY_OFFSET) {
final byte[] bytes = (byte[]) base;
// the offset includes an object header... this is only needed for unsafe copies
final long arrayOffset = offset - BYTE_ARRAY_OFFSET;
// verify that the offset and length points somewhere inside the byte array
// and that the offset can safely be truncated to a 32-bit integer
if ((long) bytes.length < arrayOffset + numBytes) {
throw new ArrayIndexOutOfBoundsException();
}
return ByteBuffer.wrap(bytes, (int) arrayOffset, numBytes);
} else {
return ByteBuffer.wrap(getBytes());
}
}
public void writeTo(OutputStream out) throws IOException {
final ByteBuffer bb = this.getByteBuffer();
assert(bb.hasArray());
// similar to Utils.writeByteBuffer but without the spark-core dependency
out.write(bb.array(), bb.arrayOffset() + bb.position(), bb.remaining());
}
/**
* Returns the number of bytes for a code point with the first byte as `b`
* @param b The first byte of a code point
*/
private static int numBytesForFirstByte(final byte b) {
final int offset = b & 0xFF;
byte numBytes = bytesOfCodePointInUTF8[offset];
return (numBytes == 0) ? 1: numBytes; // Skip the first byte disallowed in UTF-8
}
/**
* Returns the number of bytes
*/
public int numBytes() {
return numBytes;
}
/**
* Returns the number of code points in it.
*/
public int numChars() {
int len = 0;
for (int i = 0; i < numBytes; i += numBytesForFirstByte(getByte(i))) {
len += 1;
}
return len;
}
/**
* Returns a 64-bit integer that can be used as the prefix used in sorting.
*/
public long getPrefix() {
// Since JVMs are either 4-byte aligned or 8-byte aligned, we check the size of the string.
// If size is 0, just return 0.
// If size is between 0 and 4 (inclusive), assume data is 4-byte aligned under the hood and
// use a getInt to fetch the prefix.
// If size is greater than 4, assume we have at least 8 bytes of data to fetch.
// After getting the data, we use a mask to mask out data that is not part of the string.
long p;
long mask = 0;
if (IS_LITTLE_ENDIAN) {
if (numBytes >= 8) {
p = Platform.getLong(base, offset);
} else if (numBytes > 4) {
p = Platform.getLong(base, offset);
mask = (1L << (8 - numBytes) * 8) - 1;
} else if (numBytes > 0) {
p = (long) Platform.getInt(base, offset);
mask = (1L << (8 - numBytes) * 8) - 1;
} else {
p = 0;
}
p = java.lang.Long.reverseBytes(p);
} else {
// byteOrder == ByteOrder.BIG_ENDIAN
if (numBytes >= 8) {
p = Platform.getLong(base, offset);
} else if (numBytes > 4) {
p = Platform.getLong(base, offset);
mask = (1L << (8 - numBytes) * 8) - 1;
} else if (numBytes > 0) {
p = ((long) Platform.getInt(base, offset)) << 32;
mask = (1L << (8 - numBytes) * 8) - 1;
} else {
p = 0;
}
}
p &= ~mask;
return p;
}
/**
* Returns the underline bytes, will be a copy of it if it's part of another array.
*/
public byte[] getBytes() {
// avoid copy if `base` is `byte[]`
if (offset == BYTE_ARRAY_OFFSET && base instanceof byte[]
&& ((byte[]) base).length == numBytes) {
return (byte[]) base;
} else {
byte[] bytes = new byte[numBytes];
copyMemory(base, offset, bytes, BYTE_ARRAY_OFFSET, numBytes);
return bytes;
}
}
/**
* Returns a substring of this.
* @param start the position of first code point
* @param until the position after last code point, exclusive.
*/
public UTF8String substring(final int start, final int until) {
if (until <= start || start >= numBytes) {
return EMPTY_UTF8;
}
int i = 0;
int c = 0;
while (i < numBytes && c < start) {
i += numBytesForFirstByte(getByte(i));
c += 1;
}
int j = i;
while (i < numBytes && c < until) {
i += numBytesForFirstByte(getByte(i));
c += 1;
}
if (i > j) {
byte[] bytes = new byte[i - j];
copyMemory(base, offset + j, bytes, BYTE_ARRAY_OFFSET, i - j);
return fromBytes(bytes);
} else {
return EMPTY_UTF8;
}
}
public UTF8String substringSQL(int pos, int length) {
// Information regarding the pos calculation:
// Hive and SQL use one-based indexing for SUBSTR arguments but also accept zero and
// negative indices for start positions. If a start index i is greater than 0, it
// refers to element i-1 in the sequence. If a start index i is less than 0, it refers
// to the -ith element before the end of the sequence. If a start index i is 0, it
// refers to the first element.
int len = numChars();
int start = (pos > 0) ? pos -1 : ((pos < 0) ? len + pos : 0);
int end = (length == Integer.MAX_VALUE) ? len : start + length;
return substring(start, end);
}
/**
* Returns whether this contains `substring` or not.
*/
public boolean contains(final UTF8String substring) {
if (substring.numBytes == 0) {
return true;
}
byte first = substring.getByte(0);
for (int i = 0; i <= numBytes - substring.numBytes; i++) {
if (getByte(i) == first && matchAt(substring, i)) {
return true;
}
}
return false;
}
/**
* Returns the byte at position `i`.
*/
private byte getByte(int i) {
return Platform.getByte(base, offset + i);
}
private boolean matchAt(final UTF8String s, int pos) {
if (s.numBytes + pos > numBytes || pos < 0) {
return false;
}
return ByteArrayMethods.arrayEquals(base, offset + pos, s.base, s.offset, s.numBytes);
}
public boolean startsWith(final UTF8String prefix) {
return matchAt(prefix, 0);
}
public boolean endsWith(final UTF8String suffix) {
return matchAt(suffix, numBytes - suffix.numBytes);
}
/**
* Returns the upper case of this string
*/
public UTF8String toUpperCase() {
if (numBytes == 0) {
return EMPTY_UTF8;
}
byte[] bytes = new byte[numBytes];
bytes[0] = (byte) Character.toTitleCase(getByte(0));
for (int i = 0; i < numBytes; i++) {
byte b = getByte(i);
if (numBytesForFirstByte(b) != 1) {
// fallback
return toUpperCaseSlow();
}
int upper = Character.toUpperCase((int) b);
if (upper > 127) {
// fallback
return toUpperCaseSlow();
}
bytes[i] = (byte) upper;
}
return fromBytes(bytes);
}
private UTF8String toUpperCaseSlow() {
return fromString(toString().toUpperCase());
}
/**
* Returns the lower case of this string
*/
public UTF8String toLowerCase() {
if (numBytes == 0) {
return EMPTY_UTF8;
}
byte[] bytes = new byte[numBytes];
bytes[0] = (byte) Character.toTitleCase(getByte(0));
for (int i = 0; i < numBytes; i++) {
byte b = getByte(i);
if (numBytesForFirstByte(b) != 1) {
// fallback
return toLowerCaseSlow();
}
int lower = Character.toLowerCase((int) b);
if (lower > 127) {
// fallback
return toLowerCaseSlow();
}
bytes[i] = (byte) lower;
}
return fromBytes(bytes);
}
private UTF8String toLowerCaseSlow() {
return fromString(toString().toLowerCase());
}
/**
* Returns the title case of this string, that could be used as title.
*/
public UTF8String toTitleCase() {
if (numBytes == 0) {
return EMPTY_UTF8;
}
byte[] bytes = new byte[numBytes];
for (int i = 0; i < numBytes; i++) {
byte b = getByte(i);
if (i == 0 || getByte(i - 1) == ' ') {
if (numBytesForFirstByte(b) != 1) {
// fallback
return toTitleCaseSlow();
}
int upper = Character.toTitleCase(b);
if (upper > 127) {
// fallback
return toTitleCaseSlow();
}
bytes[i] = (byte) upper;
} else {
bytes[i] = b;
}
}
return fromBytes(bytes);
}
private UTF8String toTitleCaseSlow() {
StringBuffer sb = new StringBuffer();
String s = toString();
sb.append(s);
sb.setCharAt(0, Character.toTitleCase(sb.charAt(0)));
for (int i = 1; i < s.length(); i++) {
if (sb.charAt(i - 1) == ' ') {
sb.setCharAt(i, Character.toTitleCase(sb.charAt(i)));
}
}
return fromString(sb.toString());
}
/*
* Returns the index of the string `match` in this String. This string has to be a comma separated
* list. If `match` contains a comma 0 will be returned. If the `match` isn't part of this String,
* 0 will be returned, else the index of match (1-based index)
*/
public int findInSet(UTF8String match) {
if (match.contains(COMMA_UTF8)) {
return 0;
}
int n = 1, lastComma = -1;
for (int i = 0; i < numBytes; i++) {
if (getByte(i) == (byte) ',') {
if (i - (lastComma + 1) == match.numBytes &&
ByteArrayMethods.arrayEquals(base, offset + (lastComma + 1), match.base, match.offset,
match.numBytes)) {
return n;
}
lastComma = i;
n++;
}
}
if (numBytes - (lastComma + 1) == match.numBytes &&
ByteArrayMethods.arrayEquals(base, offset + (lastComma + 1), match.base, match.offset,
match.numBytes)) {
return n;
}
return 0;
}
/**
* Copy the bytes from the current UTF8String, and make a new UTF8String.
* @param start the start position of the current UTF8String in bytes.
* @param end the end position of the current UTF8String in bytes.
* @return a new UTF8String in the position of [start, end] of current UTF8String bytes.
*/
private UTF8String copyUTF8String(int start, int end) {
int len = end - start + 1;
byte[] newBytes = new byte[len];
copyMemory(base, offset + start, newBytes, BYTE_ARRAY_OFFSET, len);
return UTF8String.fromBytes(newBytes);
}
public UTF8String trim() {
int s = 0;
// skip all of the space (0x20) in the left side
while (s < this.numBytes && getByte(s) == 0x20) s++;
if (s == this.numBytes) {
// empty string
return EMPTY_UTF8;
}
// skip all of the space (0x20) in the right side
int e = this.numBytes - 1;
while (e > s && getByte(e) == 0x20) e--;
return copyUTF8String(s, e);
}
/**
* Based on the given trim string, trim this string starting from both ends
* This method searches for each character in the source string, removes the character if it is
* found in the trim string, stops at the first not found. It calls the trimLeft first, then
* trimRight. It returns a new string in which both ends trim characters have been removed.
* @param trimString the trim character string
*/
public UTF8String trim(UTF8String trimString) {
if (trimString != null) {
return trimLeft(trimString).trimRight(trimString);
} else {
return null;
}
}
public UTF8String trimLeft() {
int s = 0;
// skip all of the space (0x20) in the left side
while (s < this.numBytes && getByte(s) == 0x20) s++;
if (s == this.numBytes) {
// empty string
return EMPTY_UTF8;
} else {
return copyUTF8String(s, this.numBytes - 1);
}
}
/**
* Based on the given trim string, trim this string starting from left end
* This method searches each character in the source string starting from the left end, removes
* the character if it is in the trim string, stops at the first character which is not in the
* trim string, returns the new string.
* @param trimString the trim character string
*/
public UTF8String trimLeft(UTF8String trimString) {
if (trimString == null) return null;
// the searching byte position in the source string
int srchIdx = 0;
// the first beginning byte position of a non-matching character
int trimIdx = 0;
while (srchIdx < numBytes) {
UTF8String searchChar = copyUTF8String(
srchIdx, srchIdx + numBytesForFirstByte(this.getByte(srchIdx)) - 1);
int searchCharBytes = searchChar.numBytes;
// try to find the matching for the searchChar in the trimString set
if (trimString.find(searchChar, 0) >= 0) {
trimIdx += searchCharBytes;
} else {
// no matching, exit the search
break;
}
srchIdx += searchCharBytes;
}
if (trimIdx >= numBytes) {
// empty string
return EMPTY_UTF8;
} else {
return copyUTF8String(trimIdx, numBytes - 1);
}
}
public UTF8String trimRight() {
int e = numBytes - 1;
// skip all of the space (0x20) in the right side
while (e >= 0 && getByte(e) == 0x20) e--;
if (e < 0) {
// empty string
return EMPTY_UTF8;
} else {
return copyUTF8String(0, e);
}
}
/**
* Based on the given trim string, trim this string starting from right end
* This method searches each character in the source string starting from the right end,
* removes the character if it is in the trim string, stops at the first character which is not
* in the trim string, returns the new string.
* @param trimString the trim character string
*/
public UTF8String trimRight(UTF8String trimString) {
if (trimString == null) return null;
int charIdx = 0;
// number of characters from the source string
int numChars = 0;
// array of character length for the source string
int[] stringCharLen = new int[numBytes];
// array of the first byte position for each character in the source string
int[] stringCharPos = new int[numBytes];
// build the position and length array
while (charIdx < numBytes) {
stringCharPos[numChars] = charIdx;
stringCharLen[numChars] = numBytesForFirstByte(getByte(charIdx));
charIdx += stringCharLen[numChars];
numChars ++;
}
// index trimEnd points to the first no matching byte position from the right side of
// the source string.
int trimEnd = numBytes - 1;
while (numChars > 0) {
UTF8String searchChar = copyUTF8String(
stringCharPos[numChars - 1],
stringCharPos[numChars - 1] + stringCharLen[numChars - 1] - 1);
if (trimString.find(searchChar, 0) >= 0) {
trimEnd -= stringCharLen[numChars - 1];
} else {
break;
}
numChars --;
}
if (trimEnd < 0) {
// empty string
return EMPTY_UTF8;
} else {
return copyUTF8String(0, trimEnd);
}
}
public UTF8String reverse() {
byte[] result = new byte[this.numBytes];
int i = 0; // position in byte
while (i < numBytes) {
int len = numBytesForFirstByte(getByte(i));
copyMemory(this.base, this.offset + i, result,
BYTE_ARRAY_OFFSET + result.length - i - len, len);
i += len;
}
return UTF8String.fromBytes(result);
}
public UTF8String repeat(int times) {
if (times <= 0) {
return EMPTY_UTF8;
}
byte[] newBytes = new byte[numBytes * times];
copyMemory(this.base, this.offset, newBytes, BYTE_ARRAY_OFFSET, numBytes);
int copied = 1;
while (copied < times) {
int toCopy = Math.min(copied, times - copied);
System.arraycopy(newBytes, 0, newBytes, copied * numBytes, numBytes * toCopy);
copied += toCopy;
}
return UTF8String.fromBytes(newBytes);
}
/**
* Returns the position of the first occurrence of substr in
* current string from the specified position (0-based index).
*
* @param v the string to be searched
* @param start the start position of the current string for searching
* @return the position of the first occurrence of substr, if not found, -1 returned.
*/
public int indexOf(UTF8String v, int start) {
if (v.numBytes() == 0) {
return 0;
}
// locate to the start position.
int i = 0; // position in byte
int c = 0; // position in character
while (i < numBytes && c < start) {
i += numBytesForFirstByte(getByte(i));
c += 1;
}
do {
if (i + v.numBytes > numBytes) {
return -1;
}
if (ByteArrayMethods.arrayEquals(base, offset + i, v.base, v.offset, v.numBytes)) {
return c;
}
i += numBytesForFirstByte(getByte(i));
c += 1;
} while (i < numBytes);
return -1;
}
/**
* Find the `str` from left to right.
*/
private int find(UTF8String str, int start) {
assert (str.numBytes > 0);
while (start <= numBytes - str.numBytes) {
if (ByteArrayMethods.arrayEquals(base, offset + start, str.base, str.offset, str.numBytes)) {
return start;
}
start += 1;
}
return -1;
}
/**
* Find the `str` from right to left.
*/
private int rfind(UTF8String str, int start) {
assert (str.numBytes > 0);
while (start >= 0) {
if (ByteArrayMethods.arrayEquals(base, offset + start, str.base, str.offset, str.numBytes)) {
return start;
}
start -= 1;
}
return -1;
}
/**
* Returns the substring from string str before count occurrences of the delimiter delim.
* If count is positive, everything the left of the final delimiter (counting from left) is
* returned. If count is negative, every to the right of the final delimiter (counting from the
* right) is returned. subStringIndex performs a case-sensitive match when searching for delim.
*/
public UTF8String subStringIndex(UTF8String delim, int count) {
if (delim.numBytes == 0 || count == 0) {
return EMPTY_UTF8;
}
if (count > 0) {
int idx = -1;
while (count > 0) {
idx = find(delim, idx + 1);
if (idx >= 0) {
count --;
} else {
// can not find enough delim
return this;
}
}
if (idx == 0) {
return EMPTY_UTF8;
}
byte[] bytes = new byte[idx];
copyMemory(base, offset, bytes, BYTE_ARRAY_OFFSET, idx);
return fromBytes(bytes);
} else {
int idx = numBytes - delim.numBytes + 1;
count = -count;
while (count > 0) {
idx = rfind(delim, idx - 1);
if (idx >= 0) {
count --;
} else {
// can not find enough delim
return this;
}
}
if (idx + delim.numBytes == numBytes) {
return EMPTY_UTF8;
}
int size = numBytes - delim.numBytes - idx;
byte[] bytes = new byte[size];
copyMemory(base, offset + idx + delim.numBytes, bytes, BYTE_ARRAY_OFFSET, size);
return fromBytes(bytes);
}
}
/**
* Returns str, right-padded with pad to a length of len
* For example:
* ('hi', 5, '??') => 'hi???'
* ('hi', 1, '??') => 'h'
*/
public UTF8String rpad(int len, UTF8String pad) {
int spaces = len - this.numChars(); // number of char need to pad
if (spaces <= 0 || pad.numBytes() == 0) {
// no padding at all, return the substring of the current string
return substring(0, len);
} else {
int padChars = pad.numChars();
int count = spaces / padChars; // how many padding string needed
// the partial string of the padding
UTF8String remain = pad.substring(0, spaces - padChars * count);
byte[] data = new byte[this.numBytes + pad.numBytes * count + remain.numBytes];
copyMemory(this.base, this.offset, data, BYTE_ARRAY_OFFSET, this.numBytes);
int offset = this.numBytes;
int idx = 0;
while (idx < count) {
copyMemory(pad.base, pad.offset, data, BYTE_ARRAY_OFFSET + offset, pad.numBytes);
++ idx;
offset += pad.numBytes;
}
copyMemory(remain.base, remain.offset, data, BYTE_ARRAY_OFFSET + offset, remain.numBytes);
return UTF8String.fromBytes(data);
}
}
/**
* Returns str, left-padded with pad to a length of len.
* For example:
* ('hi', 5, '??') => '???hi'
* ('hi', 1, '??') => 'h'
*/
public UTF8String lpad(int len, UTF8String pad) {
int spaces = len - this.numChars(); // number of char need to pad
if (spaces <= 0 || pad.numBytes() == 0) {
// no padding at all, return the substring of the current string
return substring(0, len);
} else {
int padChars = pad.numChars();
int count = spaces / padChars; // how many padding string needed
// the partial string of the padding
UTF8String remain = pad.substring(0, spaces - padChars * count);
byte[] data = new byte[this.numBytes + pad.numBytes * count + remain.numBytes];
int offset = 0;
int idx = 0;
while (idx < count) {
copyMemory(pad.base, pad.offset, data, BYTE_ARRAY_OFFSET + offset, pad.numBytes);
++ idx;
offset += pad.numBytes;
}
copyMemory(remain.base, remain.offset, data, BYTE_ARRAY_OFFSET + offset, remain.numBytes);
offset += remain.numBytes;
copyMemory(this.base, this.offset, data, BYTE_ARRAY_OFFSET + offset, numBytes());
return UTF8String.fromBytes(data);
}
}
/**
* Concatenates input strings together into a single string. Returns null if any input is null.
*/
public static UTF8String concat(UTF8String... inputs) {
// Compute the total length of the result.
long totalLength = 0;
for (int i = 0; i < inputs.length; i++) {
if (inputs[i] != null) {
totalLength += (long)inputs[i].numBytes;
} else {
return null;
}
}
// Allocate a new byte array, and copy the inputs one by one into it.
final byte[] result = new byte[Ints.checkedCast(totalLength)];
int offset = 0;
for (int i = 0; i < inputs.length; i++) {
int len = inputs[i].numBytes;
copyMemory(
inputs[i].base, inputs[i].offset,
result, BYTE_ARRAY_OFFSET + offset,
len);
offset += len;
}
return fromBytes(result);
}
/**
* Concatenates input strings together into a single string using the separator.
* A null input is skipped. For example, concat(",", "a", null, "c") would yield "a,c".
*/
public static UTF8String concatWs(UTF8String separator, UTF8String... inputs) {
if (separator == null) {
return null;
}
int numInputBytes = 0; // total number of bytes from the inputs
int numInputs = 0; // number of non-null inputs
for (int i = 0; i < inputs.length; i++) {
if (inputs[i] != null) {
numInputBytes += inputs[i].numBytes;
numInputs++;
}
}
if (numInputs == 0) {
// Return an empty string if there is no input, or all the inputs are null.
return EMPTY_UTF8;
}
// Allocate a new byte array, and copy the inputs one by one into it.
// The size of the new array is the size of all inputs, plus the separators.
final byte[] result = new byte[numInputBytes + (numInputs - 1) * separator.numBytes];
int offset = 0;
for (int i = 0, j = 0; i < inputs.length; i++) {
if (inputs[i] != null) {
int len = inputs[i].numBytes;
copyMemory(
inputs[i].base, inputs[i].offset,
result, BYTE_ARRAY_OFFSET + offset,
len);
offset += len;
j++;
// Add separator if this is not the last input.
if (j < numInputs) {
copyMemory(
separator.base, separator.offset,
result, BYTE_ARRAY_OFFSET + offset,
separator.numBytes);
offset += separator.numBytes;
}
}
}
return fromBytes(result);
}
public UTF8String[] split(UTF8String pattern, int limit) {
String[] splits = toString().split(pattern.toString(), limit);
UTF8String[] res = new UTF8String[splits.length];
for (int i = 0; i < res.length; i++) {
res[i] = fromString(splits[i]);
}
return res;
}
public UTF8String replace(UTF8String search, UTF8String replace) {
if (EMPTY_UTF8.equals(search)) {
return this;
}
String replaced = toString().replace(
search.toString(), replace.toString());
return fromString(replaced);
}
// TODO: Need to use `Code Point` here instead of Char in case the character longer than 2 bytes
public UTF8String translate(Map<Character, Character> dict) {
String srcStr = this.toString();
StringBuilder sb = new StringBuilder();
for(int k = 0; k< srcStr.length(); k++) {
if (null == dict.get(srcStr.charAt(k))) {
sb.append(srcStr.charAt(k));
} else if ('\0' != dict.get(srcStr.charAt(k))){
sb.append(dict.get(srcStr.charAt(k)));
}
}
return fromString(sb.toString());
}
/**
* Wrapper over `long` to allow result of parsing long from string to be accessed via reference.
* This is done solely for better performance and is not expected to be used by end users.
*/
public static class LongWrapper implements Serializable {
public transient long value = 0;
}
/**
* Wrapper over `int` to allow result of parsing integer from string to be accessed via reference.
* This is done solely for better performance and is not expected to be used by end users.
*
* {@link LongWrapper} could have been used here but using `int` directly save the extra cost of
* conversion from `long` to `int`
*/
public static class IntWrapper implements Serializable {
public transient int value = 0;
}
/**
* Parses this UTF8String to long.
*
* Note that, in this method we accumulate the result in negative format, and convert it to
* positive format at the end, if this string is not started with '-'. This is because min value
* is bigger than max value in digits, e.g. Long.MAX_VALUE is '9223372036854775807' and
* Long.MIN_VALUE is '-9223372036854775808'.
*
* This code is mostly copied from LazyLong.parseLong in Hive.
*
* @param toLongResult If a valid `long` was parsed from this UTF8String, then its value would
* be set in `toLongResult`
* @return true if the parsing was successful else false
*/
public boolean toLong(LongWrapper toLongResult) {
if (numBytes == 0) {
return false;
}
byte b = getByte(0);
final boolean negative = b == '-';
int offset = 0;
if (negative || b == '+') {
offset++;
if (numBytes == 1) {
return false;
}
}
final byte separator = '.';
final int radix = 10;
final long stopValue = Long.MIN_VALUE / radix;
long result = 0;
while (offset < numBytes) {
b = getByte(offset);
offset++;
if (b == separator) {
// We allow decimals and will return a truncated integral in that case.
// Therefore we won't throw an exception here (checking the fractional
// part happens below.)
break;
}
int digit;
if (b >= '0' && b <= '9') {
digit = b - '0';
} else {
return false;
}
// We are going to process the new digit and accumulate the result. However, before doing
// this, if the result is already smaller than the stopValue(Long.MIN_VALUE / radix), then
// result * 10 will definitely be smaller than minValue, and we can stop.
if (result < stopValue) {
return false;
}
result = result * radix - digit;
// Since the previous result is less than or equal to stopValue(Long.MIN_VALUE / radix), we
// can just use `result > 0` to check overflow. If result overflows, we should stop.
if (result > 0) {
return false;
}
}
// This is the case when we've encountered a decimal separator. The fractional
// part will not change the number, but we will verify that the fractional part
// is well formed.
while (offset < numBytes) {
byte currentByte = getByte(offset);
if (currentByte < '0' || currentByte > '9') {
return false;
}
offset++;
}
if (!negative) {
result = -result;
if (result < 0) {
return false;
}
}
toLongResult.value = result;
return true;
}
/**
* Parses this UTF8String to int.
*
* Note that, in this method we accumulate the result in negative format, and convert it to
* positive format at the end, if this string is not started with '-'. This is because min value
* is bigger than max value in digits, e.g. Integer.MAX_VALUE is '2147483647' and
* Integer.MIN_VALUE is '-2147483648'.
*
* This code is mostly copied from LazyInt.parseInt in Hive.
*
* Note that, this method is almost same as `toLong`, but we leave it duplicated for performance
* reasons, like Hive does.
*
* @param intWrapper If a valid `int` was parsed from this UTF8String, then its value would
* be set in `intWrapper`
* @return true if the parsing was successful else false
*/
public boolean toInt(IntWrapper intWrapper) {
if (numBytes == 0) {
return false;
}
byte b = getByte(0);
final boolean negative = b == '-';
int offset = 0;
if (negative || b == '+') {
offset++;
if (numBytes == 1) {
return false;
}
}
final byte separator = '.';
final int radix = 10;
final int stopValue = Integer.MIN_VALUE / radix;
int result = 0;
while (offset < numBytes) {
b = getByte(offset);
offset++;
if (b == separator) {
// We allow decimals and will return a truncated integral in that case.
// Therefore we won't throw an exception here (checking the fractional
// part happens below.)
break;
}
int digit;
if (b >= '0' && b <= '9') {
digit = b - '0';
} else {
return false;
}
// We are going to process the new digit and accumulate the result. However, before doing
// this, if the result is already smaller than the stopValue(Integer.MIN_VALUE / radix), then
// result * 10 will definitely be smaller than minValue, and we can stop
if (result < stopValue) {
return false;
}
result = result * radix - digit;
// Since the previous result is less than or equal to stopValue(Integer.MIN_VALUE / radix),
// we can just use `result > 0` to check overflow. If result overflows, we should stop
if (result > 0) {
return false;
}
}
// This is the case when we've encountered a decimal separator. The fractional
// part will not change the number, but we will verify that the fractional part
// is well formed.
while (offset < numBytes) {
byte currentByte = getByte(offset);
if (currentByte < '0' || currentByte > '9') {
return false;
}
offset++;
}
if (!negative) {
result = -result;
if (result < 0) {
return false;
}
}
intWrapper.value = result;
return true;
}
public boolean toShort(IntWrapper intWrapper) {
if (toInt(intWrapper)) {
int intValue = intWrapper.value;
short result = (short) intValue;
if (result == intValue) {
return true;
}
}
return false;
}
public boolean toByte(IntWrapper intWrapper) {
if (toInt(intWrapper)) {
int intValue = intWrapper.value;
byte result = (byte) intValue;
if (result == intValue) {
return true;
}
}
return false;
}
@Override
public String toString() {
return new String(getBytes(), StandardCharsets.UTF_8);
}
@Override
public UTF8String clone() {
return fromBytes(getBytes());
}
public UTF8String copy() {
byte[] bytes = new byte[numBytes];
copyMemory(base, offset, bytes, BYTE_ARRAY_OFFSET, numBytes);
return fromBytes(bytes);
}
@Override
public int compareTo(@Nonnull final UTF8String other) {
int len = Math.min(numBytes, other.numBytes);
int wordMax = (len / 8) * 8;
long roffset = other.offset;
Object rbase = other.base;
for (int i = 0; i < wordMax; i += 8) {
long left = getLong(base, offset + i);
long right = getLong(rbase, roffset + i);
if (left != right) {
if (IS_LITTLE_ENDIAN) {
return Long.compareUnsigned(Long.reverseBytes(left), Long.reverseBytes(right));
} else {
return Long.compareUnsigned(left, right);
}
}
}
for (int i = wordMax; i < len; i++) {
// In UTF-8, the byte should be unsigned, so we should compare them as unsigned int.
int res = (getByte(i) & 0xFF) - (Platform.getByte(rbase, roffset + i) & 0xFF);
if (res != 0) {
return res;
}
}
return numBytes - other.numBytes;
}
public int compare(final UTF8String other) {
return compareTo(other);
}
@Override
public boolean equals(final Object other) {
if (other instanceof UTF8String) {
UTF8String o = (UTF8String) other;
if (numBytes != o.numBytes) {
return false;
}
return ByteArrayMethods.arrayEquals(base, offset, o.base, o.offset, numBytes);
} else {
return false;
}
}
/**
* Levenshtein distance is a metric for measuring the distance of two strings. The distance is
* defined by the minimum number of single-character edits (i.e. insertions, deletions or
* substitutions) that are required to change one of the strings into the other.
*/
public int levenshteinDistance(UTF8String other) {
// Implementation adopted from org.apache.common.lang3.StringUtils.getLevenshteinDistance
int n = numChars();
int m = other.numChars();
if (n == 0) {
return m;
} else if (m == 0) {
return n;
}
UTF8String s, t;
if (n <= m) {
s = this;
t = other;
} else {
s = other;
t = this;
int swap;
swap = n;
n = m;
m = swap;
}
int[] p = new int[n + 1];
int[] d = new int[n + 1];
int[] swap;
int i, i_bytes, j, j_bytes, num_bytes_j, cost;
for (i = 0; i <= n; i++) {
p[i] = i;
}
for (j = 0, j_bytes = 0; j < m; j_bytes += num_bytes_j, j++) {
num_bytes_j = numBytesForFirstByte(t.getByte(j_bytes));
d[0] = j + 1;
for (i = 0, i_bytes = 0; i < n; i_bytes += numBytesForFirstByte(s.getByte(i_bytes)), i++) {
if (s.getByte(i_bytes) != t.getByte(j_bytes) ||
num_bytes_j != numBytesForFirstByte(s.getByte(i_bytes))) {
cost = 1;
} else {
cost = (ByteArrayMethods.arrayEquals(t.base, t.offset + j_bytes, s.base,
s.offset + i_bytes, num_bytes_j)) ? 0 : 1;
}
d[i + 1] = Math.min(Math.min(d[i] + 1, p[i + 1] + 1), p[i] + cost);
}
swap = p;
p = d;
d = swap;
}
return p[n];
}
@Override
public int hashCode() {
return Murmur3_x86_32.hashUnsafeBytes(base, offset, numBytes, 42);
}
/**
* Soundex mapping table
*/
private static final byte[] US_ENGLISH_MAPPING = {'0', '1', '2', '3', '0', '1', '2', '7',
'0', '2', '2', '4', '5', '5', '0', '1', '2', '6', '2', '3', '0', '1', '7', '2', '0', '2'};
/**
* Encodes a string into a Soundex value. Soundex is an encoding used to relate similar names,
* but can also be used as a general purpose scheme to find word with similar phonemes.
* https://en.wikipedia.org/wiki/Soundex
*/
public UTF8String soundex() {
if (numBytes == 0) {
return EMPTY_UTF8;
}
byte b = getByte(0);
if ('a' <= b && b <= 'z') {
b -= 32;
} else if (b < 'A' || 'Z' < b) {
// first character must be a letter
return this;
}
byte[] sx = {'0', '0', '0', '0'};
sx[0] = b;
int sxi = 1;
int idx = b - 'A';
byte lastCode = US_ENGLISH_MAPPING[idx];
for (int i = 1; i < numBytes; i++) {
b = getByte(i);
if ('a' <= b && b <= 'z') {
b -= 32;
} else if (b < 'A' || 'Z' < b) {
// not a letter, skip it
lastCode = '0';
continue;
}
idx = b - 'A';
byte code = US_ENGLISH_MAPPING[idx];
if (code == '7') {
// ignore it
} else {
if (code != '0' && code != lastCode) {
sx[sxi++] = code;
if (sxi > 3) break;
}
lastCode = code;
}
}
return UTF8String.fromBytes(sx);
}
public void writeExternal(ObjectOutput out) throws IOException {
byte[] bytes = getBytes();
out.writeInt(bytes.length);
out.write(bytes);
}
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
offset = BYTE_ARRAY_OFFSET;
numBytes = in.readInt();
base = new byte[numBytes];
in.readFully((byte[]) base);
}
@Override
public void write(Kryo kryo, Output out) {
byte[] bytes = getBytes();
out.writeInt(bytes.length);
out.write(bytes);
}
@Override
public void read(Kryo kryo, Input in) {
this.offset = BYTE_ARRAY_OFFSET;
this.numBytes = in.readInt();
this.base = new byte[numBytes];
in.read((byte[]) base);
}
}
| 9,934 |
0 | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe/memory/MemoryBlock.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.memory;
import javax.annotation.Nullable;
import org.apache.spark.unsafe.Platform;
/**
* A consecutive block of memory, starting at a {@link MemoryLocation} with a fixed size.
*/
public class MemoryBlock extends MemoryLocation {
/** Special `pageNumber` value for pages which were not allocated by TaskMemoryManagers */
public static final int NO_PAGE_NUMBER = -1;
/**
* Special `pageNumber` value for marking pages that have been freed in the TaskMemoryManager.
* We set `pageNumber` to this value in TaskMemoryManager.freePage() so that MemoryAllocator
* can detect if pages which were allocated by TaskMemoryManager have been freed in the TMM
* before being passed to MemoryAllocator.free() (it is an error to allocate a page in
* TaskMemoryManager and then directly free it in a MemoryAllocator without going through
* the TMM freePage() call).
*/
public static final int FREED_IN_TMM_PAGE_NUMBER = -2;
/**
* Special `pageNumber` value for pages that have been freed by the MemoryAllocator. This allows
* us to detect double-frees.
*/
public static final int FREED_IN_ALLOCATOR_PAGE_NUMBER = -3;
private final long length;
/**
* Optional page number; used when this MemoryBlock represents a page allocated by a
* TaskMemoryManager. This field is public so that it can be modified by the TaskMemoryManager,
* which lives in a different package.
*/
public int pageNumber = NO_PAGE_NUMBER;
public MemoryBlock(@Nullable Object obj, long offset, long length) {
super(obj, offset);
this.length = length;
}
/**
* Returns the size of the memory block.
*/
public long size() {
return length;
}
/**
* Creates a memory block pointing to the memory used by the long array.
*/
public static MemoryBlock fromLongArray(final long[] array) {
return new MemoryBlock(array, Platform.LONG_ARRAY_OFFSET, array.length * 8L);
}
/**
* Fills the memory block with the specified byte value.
*/
public void fill(byte value) {
Platform.setMemory(obj, offset, length, value);
}
}
| 9,935 |
0 | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe/memory/HeapMemoryAllocator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.memory;
import javax.annotation.concurrent.GuardedBy;
import java.lang.ref.WeakReference;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
import org.apache.spark.unsafe.Platform;
/**
* A simple {@link MemoryAllocator} that can allocate up to 16GB using a JVM long primitive array.
*/
public class HeapMemoryAllocator implements MemoryAllocator {
@GuardedBy("this")
private final Map<Long, LinkedList<WeakReference<long[]>>> bufferPoolsBySize = new HashMap<>();
private static final int POOLING_THRESHOLD_BYTES = 1024 * 1024;
/**
* Returns true if allocations of the given size should go through the pooling mechanism and
* false otherwise.
*/
private boolean shouldPool(long size) {
// Very small allocations are less likely to benefit from pooling.
return size >= POOLING_THRESHOLD_BYTES;
}
@Override
public MemoryBlock allocate(long size) throws OutOfMemoryError {
int numWords = (int) ((size + 7) / 8);
long alignedSize = numWords * 8L;
assert (alignedSize >= size);
if (shouldPool(alignedSize)) {
synchronized (this) {
final LinkedList<WeakReference<long[]>> pool = bufferPoolsBySize.get(alignedSize);
if (pool != null) {
while (!pool.isEmpty()) {
final WeakReference<long[]> arrayReference = pool.pop();
final long[] array = arrayReference.get();
if (array != null) {
assert (array.length * 8L >= size);
MemoryBlock memory = new MemoryBlock(array, Platform.LONG_ARRAY_OFFSET, size);
if (MemoryAllocator.MEMORY_DEBUG_FILL_ENABLED) {
memory.fill(MemoryAllocator.MEMORY_DEBUG_FILL_CLEAN_VALUE);
}
return memory;
}
}
bufferPoolsBySize.remove(alignedSize);
}
}
}
long[] array = new long[numWords];
MemoryBlock memory = new MemoryBlock(array, Platform.LONG_ARRAY_OFFSET, size);
if (MemoryAllocator.MEMORY_DEBUG_FILL_ENABLED) {
memory.fill(MemoryAllocator.MEMORY_DEBUG_FILL_CLEAN_VALUE);
}
return memory;
}
@Override
public void free(MemoryBlock memory) {
assert (memory.obj != null) :
"baseObject was null; are you trying to use the on-heap allocator to free off-heap memory?";
assert (memory.pageNumber != MemoryBlock.FREED_IN_ALLOCATOR_PAGE_NUMBER) :
"page has already been freed";
assert ((memory.pageNumber == MemoryBlock.NO_PAGE_NUMBER)
|| (memory.pageNumber == MemoryBlock.FREED_IN_TMM_PAGE_NUMBER)) :
"TMM-allocated pages must first be freed via TMM.freePage(), not directly in allocator " +
"free()";
final long size = memory.size();
if (MemoryAllocator.MEMORY_DEBUG_FILL_ENABLED) {
memory.fill(MemoryAllocator.MEMORY_DEBUG_FILL_FREED_VALUE);
}
// Mark the page as freed (so we can detect double-frees).
memory.pageNumber = MemoryBlock.FREED_IN_ALLOCATOR_PAGE_NUMBER;
// As an additional layer of defense against use-after-free bugs, we mutate the
// MemoryBlock to null out its reference to the long[] array.
long[] array = (long[]) memory.obj;
memory.setObjAndOffset(null, 0);
long alignedSize = ((size + 7) / 8) * 8;
if (shouldPool(alignedSize)) {
synchronized (this) {
LinkedList<WeakReference<long[]>> pool = bufferPoolsBySize.get(alignedSize);
if (pool == null) {
pool = new LinkedList<>();
bufferPoolsBySize.put(alignedSize, pool);
}
pool.add(new WeakReference<>(array));
}
} else {
// Do nothing
}
}
}
| 9,936 |
0 | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe/memory/MemoryLocation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.memory;
import javax.annotation.Nullable;
/**
* A memory location. Tracked either by a memory address (with off-heap allocation),
* or by an offset from a JVM object (in-heap allocation).
*/
public class MemoryLocation {
@Nullable
Object obj;
long offset;
public MemoryLocation(@Nullable Object obj, long offset) {
this.obj = obj;
this.offset = offset;
}
public MemoryLocation() {
this(null, 0);
}
public void setObjAndOffset(Object newObj, long newOffset) {
this.obj = newObj;
this.offset = newOffset;
}
public final Object getBaseObject() {
return obj;
}
public final long getBaseOffset() {
return offset;
}
}
| 9,937 |
0 | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe/memory/UnsafeMemoryAllocator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.memory;
import org.apache.spark.unsafe.Platform;
/**
* A simple {@link MemoryAllocator} that uses {@code Unsafe} to allocate off-heap memory.
*/
public class UnsafeMemoryAllocator implements MemoryAllocator {
@Override
public MemoryBlock allocate(long size) throws OutOfMemoryError {
long address = Platform.allocateMemory(size);
MemoryBlock memory = new MemoryBlock(null, address, size);
if (MemoryAllocator.MEMORY_DEBUG_FILL_ENABLED) {
memory.fill(MemoryAllocator.MEMORY_DEBUG_FILL_CLEAN_VALUE);
}
return memory;
}
@Override
public void free(MemoryBlock memory) {
assert (memory.obj == null) :
"baseObject not null; are you trying to use the off-heap allocator to free on-heap memory?";
assert (memory.pageNumber != MemoryBlock.FREED_IN_ALLOCATOR_PAGE_NUMBER) :
"page has already been freed";
assert ((memory.pageNumber == MemoryBlock.NO_PAGE_NUMBER)
|| (memory.pageNumber == MemoryBlock.FREED_IN_TMM_PAGE_NUMBER)) :
"TMM-allocated pages must be freed via TMM.freePage(), not directly in allocator free()";
if (MemoryAllocator.MEMORY_DEBUG_FILL_ENABLED) {
memory.fill(MemoryAllocator.MEMORY_DEBUG_FILL_FREED_VALUE);
}
Platform.freeMemory(memory.offset);
// As an additional layer of defense against use-after-free bugs, we mutate the
// MemoryBlock to reset its pointer.
memory.offset = 0;
// Mark the page as freed (so we can detect double-frees).
memory.pageNumber = MemoryBlock.FREED_IN_ALLOCATOR_PAGE_NUMBER;
}
}
| 9,938 |
0 | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe/memory/MemoryAllocator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.memory;
public interface MemoryAllocator {
/**
* Whether to fill newly allocated and deallocated memory with 0xa5 and 0x5a bytes respectively.
* This helps catch misuse of uninitialized or freed memory, but imposes some overhead.
*/
boolean MEMORY_DEBUG_FILL_ENABLED = Boolean.parseBoolean(
System.getProperty("spark.memory.debugFill", "false"));
// Same as jemalloc's debug fill values.
byte MEMORY_DEBUG_FILL_CLEAN_VALUE = (byte)0xa5;
byte MEMORY_DEBUG_FILL_FREED_VALUE = (byte)0x5a;
/**
* Allocates a contiguous block of memory. Note that the allocated memory is not guaranteed
* to be zeroed out (call `fill(0)` on the result if this is necessary).
*/
MemoryBlock allocate(long size) throws OutOfMemoryError;
void free(MemoryBlock memory);
MemoryAllocator UNSAFE = new UnsafeMemoryAllocator();
MemoryAllocator HEAP = new HeapMemoryAllocator();
}
| 9,939 |
0 | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe/hash/Murmur3_x86_32.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.hash;
import org.apache.spark.unsafe.Platform;
/**
* 32-bit Murmur3 hasher. This is based on Guava's Murmur3_32HashFunction.
*/
public final class Murmur3_x86_32 {
private static final int C1 = 0xcc9e2d51;
private static final int C2 = 0x1b873593;
private final int seed;
public Murmur3_x86_32(int seed) {
this.seed = seed;
}
@Override
public String toString() {
return "Murmur3_32(seed=" + seed + ")";
}
public int hashInt(int input) {
return hashInt(input, seed);
}
public static int hashInt(int input, int seed) {
int k1 = mixK1(input);
int h1 = mixH1(seed, k1);
return fmix(h1, 4);
}
public int hashUnsafeWords(Object base, long offset, int lengthInBytes) {
return hashUnsafeWords(base, offset, lengthInBytes, seed);
}
public static int hashUnsafeWords(Object base, long offset, int lengthInBytes, int seed) {
// This is based on Guava's `Murmur32_Hasher.processRemaining(ByteBuffer)` method.
assert (lengthInBytes % 8 == 0): "lengthInBytes must be a multiple of 8 (word-aligned)";
int h1 = hashBytesByInt(base, offset, lengthInBytes, seed);
return fmix(h1, lengthInBytes);
}
public static int hashUnsafeBytes(Object base, long offset, int lengthInBytes, int seed) {
// This is not compatible with original and another implementations.
// But remain it for backward compatibility for the components existing before 2.3.
assert (lengthInBytes >= 0): "lengthInBytes cannot be negative";
int lengthAligned = lengthInBytes - lengthInBytes % 4;
int h1 = hashBytesByInt(base, offset, lengthAligned, seed);
for (int i = lengthAligned; i < lengthInBytes; i++) {
int halfWord = Platform.getByte(base, offset + i);
int k1 = mixK1(halfWord);
h1 = mixH1(h1, k1);
}
return fmix(h1, lengthInBytes);
}
public static int hashUnsafeBytes2(Object base, long offset, int lengthInBytes, int seed) {
// This is compatible with original and another implementations.
// Use this method for new components after Spark 2.3.
assert (lengthInBytes >= 0): "lengthInBytes cannot be negative";
int lengthAligned = lengthInBytes - lengthInBytes % 4;
int h1 = hashBytesByInt(base, offset, lengthAligned, seed);
int k1 = 0;
for (int i = lengthAligned, shift = 0; i < lengthInBytes; i++, shift += 8) {
k1 ^= (Platform.getByte(base, offset + i) & 0xFF) << shift;
}
h1 ^= mixK1(k1);
return fmix(h1, lengthInBytes);
}
private static int hashBytesByInt(Object base, long offset, int lengthInBytes, int seed) {
assert (lengthInBytes % 4 == 0);
int h1 = seed;
for (int i = 0; i < lengthInBytes; i += 4) {
int halfWord = Platform.getInt(base, offset + i);
int k1 = mixK1(halfWord);
h1 = mixH1(h1, k1);
}
return h1;
}
public int hashLong(long input) {
return hashLong(input, seed);
}
public static int hashLong(long input, int seed) {
int low = (int) input;
int high = (int) (input >>> 32);
int k1 = mixK1(low);
int h1 = mixH1(seed, k1);
k1 = mixK1(high);
h1 = mixH1(h1, k1);
return fmix(h1, 8);
}
private static int mixK1(int k1) {
k1 *= C1;
k1 = Integer.rotateLeft(k1, 15);
k1 *= C2;
return k1;
}
private static int mixH1(int h1, int k1) {
h1 ^= k1;
h1 = Integer.rotateLeft(h1, 13);
h1 = h1 * 5 + 0xe6546b64;
return h1;
}
// Finalization mix - force all bits of a hash block to avalanche
private static int fmix(int h1, int length) {
h1 ^= length;
h1 ^= h1 >>> 16;
h1 *= 0x85ebca6b;
h1 ^= h1 >>> 13;
h1 *= 0xc2b2ae35;
h1 ^= h1 >>> 16;
return h1;
}
}
| 9,940 |
0 | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe/array/LongArray.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.array;
import org.apache.spark.unsafe.Platform;
import org.apache.spark.unsafe.memory.MemoryBlock;
/**
* An array of long values. Compared with native JVM arrays, this:
* <ul>
* <li>supports using both in-heap and off-heap memory</li>
* <li>has no bound checking, and thus can crash the JVM process when assert is turned off</li>
* </ul>
*/
public final class LongArray {
// This is a long so that we perform long multiplications when computing offsets.
private static final long WIDTH = 8;
private final MemoryBlock memory;
private final Object baseObj;
private final long baseOffset;
private final long length;
public LongArray(MemoryBlock memory) {
assert memory.size() < (long) Integer.MAX_VALUE * 8: "Array size >= Integer.MAX_VALUE elements";
this.memory = memory;
this.baseObj = memory.getBaseObject();
this.baseOffset = memory.getBaseOffset();
this.length = memory.size() / WIDTH;
}
public MemoryBlock memoryBlock() {
return memory;
}
public Object getBaseObject() {
return baseObj;
}
public long getBaseOffset() {
return baseOffset;
}
/**
* Returns the number of elements this array can hold.
*/
public long size() {
return length;
}
/**
* Fill this all with 0L.
*/
public void zeroOut() {
for (long off = baseOffset; off < baseOffset + length * WIDTH; off += WIDTH) {
Platform.putLong(baseObj, off, 0);
}
}
/**
* Sets the value at position {@code index}.
*/
public void set(int index, long value) {
assert index >= 0 : "index (" + index + ") should >= 0";
assert index < length : "index (" + index + ") should < length (" + length + ")";
Platform.putLong(baseObj, baseOffset + index * WIDTH, value);
}
/**
* Returns the value at position {@code index}.
*/
public long get(int index) {
assert index >= 0 : "index (" + index + ") should >= 0";
assert index < length : "index (" + index + ") should < length (" + length + ")";
return Platform.getLong(baseObj, baseOffset + index * WIDTH);
}
}
| 9,941 |
0 | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/unsafe/array/ByteArrayMethods.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.unsafe.array;
import org.apache.spark.unsafe.Platform;
public class ByteArrayMethods {
private ByteArrayMethods() {
// Private constructor, since this class only contains static methods.
}
/** Returns the next number greater or equal num that is power of 2. */
public static long nextPowerOf2(long num) {
final long highBit = Long.highestOneBit(num);
return (highBit == num) ? num : highBit << 1;
}
public static int roundNumberOfBytesToNearestWord(int numBytes) {
return (int)roundNumberOfBytesToNearestWord((long)numBytes);
}
public static long roundNumberOfBytesToNearestWord(long numBytes) {
long remainder = numBytes & 0x07; // This is equivalent to `numBytes % 8`
if (remainder == 0) {
return numBytes;
} else {
return numBytes + (8 - remainder);
}
}
// Some JVMs can't allocate arrays of length Integer.MAX_VALUE; actual max is somewhat smaller.
// Be conservative and lower the cap a little.
// Refer to "http://hg.openjdk.java.net/jdk8/jdk8/jdk/file/tip/src/share/classes/java/util/ArrayList.java#l229"
// This value is word rounded. Use this value if the allocated byte arrays are used to store other
// types rather than bytes.
public static int MAX_ROUNDED_ARRAY_LENGTH = Integer.MAX_VALUE - 15;
private static final boolean unaligned = Platform.unaligned();
/**
* Optimized byte array equality check for byte arrays.
* @return true if the arrays are equal, false otherwise
*/
public static boolean arrayEquals(
Object leftBase, long leftOffset, Object rightBase, long rightOffset, final long length) {
int i = 0;
// check if stars align and we can get both offsets to be aligned
if ((leftOffset % 8) == (rightOffset % 8)) {
while ((leftOffset + i) % 8 != 0 && i < length) {
if (Platform.getByte(leftBase, leftOffset + i) !=
Platform.getByte(rightBase, rightOffset + i)) {
return false;
}
i += 1;
}
}
// for architectures that support unaligned accesses, chew it up 8 bytes at a time
if (unaligned || (((leftOffset + i) % 8 == 0) && ((rightOffset + i) % 8 == 0))) {
while (i <= length - 8) {
if (Platform.getLong(leftBase, leftOffset + i) !=
Platform.getLong(rightBase, rightOffset + i)) {
return false;
}
i += 8;
}
}
// this will finish off the unaligned comparisons, or do the entire aligned
// comparison whichever is needed.
while (i < length) {
if (Platform.getByte(leftBase, leftOffset + i) !=
Platform.getByte(rightBase, rightOffset + i)) {
return false;
}
i += 1;
}
return true;
}
}
| 9,942 |
0 | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/sql/catalyst | Create_ds/spark/common/unsafe/src/main/java/org/apache/spark/sql/catalyst/expressions/HiveHasher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions;
import org.apache.spark.unsafe.Platform;
/**
* Simulates Hive's hashing function from Hive v1.2.1
* org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils#hashcode()
*/
public class HiveHasher {
@Override
public String toString() {
return HiveHasher.class.getSimpleName();
}
public static int hashInt(int input) {
return input;
}
public static int hashLong(long input) {
return (int) ((input >>> 32) ^ input);
}
public static int hashUnsafeBytes(Object base, long offset, int lengthInBytes) {
assert (lengthInBytes >= 0): "lengthInBytes cannot be negative";
int result = 0;
for (int i = 0; i < lengthInBytes; i++) {
result = (result * 31) + (int) Platform.getByte(base, offset + i);
}
return result;
}
}
| 9,943 |
0 | Create_ds/spark/common/kvstore/src/test/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/test/java/org/apache/spark/util/kvstore/InMemoryIteratorSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
public class InMemoryIteratorSuite extends DBIteratorSuite {
@Override
protected KVStore createStore() {
return new InMemoryStore();
}
}
| 9,944 |
0 | Create_ds/spark/common/kvstore/src/test/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/test/java/org/apache/spark/util/kvstore/InMemoryStoreSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import java.util.NoSuchElementException;
import com.google.common.collect.ImmutableSet;
import org.junit.Test;
import static org.junit.Assert.*;
public class InMemoryStoreSuite {
@Test
public void testObjectWriteReadDelete() throws Exception {
KVStore store = new InMemoryStore();
CustomType1 t = new CustomType1();
t.key = "key";
t.id = "id";
t.name = "name";
try {
store.read(CustomType1.class, t.key);
fail("Expected exception for non-existent object.");
} catch (NoSuchElementException nsee) {
// Expected.
}
store.write(t);
assertEquals(t, store.read(t.getClass(), t.key));
assertEquals(1L, store.count(t.getClass()));
store.delete(t.getClass(), t.key);
try {
store.read(t.getClass(), t.key);
fail("Expected exception for deleted object.");
} catch (NoSuchElementException nsee) {
// Expected.
}
}
@Test
public void testMultipleObjectWriteReadDelete() throws Exception {
KVStore store = new InMemoryStore();
CustomType1 t1 = new CustomType1();
t1.key = "key1";
t1.id = "id";
t1.name = "name1";
CustomType1 t2 = new CustomType1();
t2.key = "key2";
t2.id = "id";
t2.name = "name2";
store.write(t1);
store.write(t2);
assertEquals(t1, store.read(t1.getClass(), t1.key));
assertEquals(t2, store.read(t2.getClass(), t2.key));
assertEquals(2L, store.count(t1.getClass()));
store.delete(t1.getClass(), t1.key);
assertEquals(t2, store.read(t2.getClass(), t2.key));
store.delete(t2.getClass(), t2.key);
try {
store.read(t2.getClass(), t2.key);
fail("Expected exception for deleted object.");
} catch (NoSuchElementException nsee) {
// Expected.
}
}
@Test
public void testMetadata() throws Exception {
KVStore store = new InMemoryStore();
assertNull(store.getMetadata(CustomType1.class));
CustomType1 t = new CustomType1();
t.id = "id";
t.name = "name";
store.setMetadata(t);
assertEquals(t, store.getMetadata(CustomType1.class));
store.setMetadata(null);
assertNull(store.getMetadata(CustomType1.class));
}
@Test
public void testUpdate() throws Exception {
KVStore store = new InMemoryStore();
CustomType1 t = new CustomType1();
t.key = "key";
t.id = "id";
t.name = "name";
store.write(t);
t.name = "anotherName";
store.write(t);
assertEquals(1, store.count(t.getClass()));
assertSame(t, store.read(t.getClass(), t.key));
}
@Test
public void testArrayIndices() throws Exception {
KVStore store = new InMemoryStore();
ArrayKeyIndexType o = new ArrayKeyIndexType();
o.key = new int[] { 1, 2 };
o.id = new String[] { "3", "4" };
store.write(o);
assertEquals(o, store.read(ArrayKeyIndexType.class, o.key));
assertEquals(o, store.view(ArrayKeyIndexType.class).index("id").first(o.id).iterator().next());
}
@Test
public void testRemoveAll() throws Exception {
KVStore store = new InMemoryStore();
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
ArrayKeyIndexType o = new ArrayKeyIndexType();
o.key = new int[] { i, j, 0 };
o.id = new String[] { "things" };
store.write(o);
o = new ArrayKeyIndexType();
o.key = new int[] { i, j, 1 };
o.id = new String[] { "more things" };
store.write(o);
}
}
ArrayKeyIndexType o = new ArrayKeyIndexType();
o.key = new int[] { 2, 2, 2 };
o.id = new String[] { "things" };
store.write(o);
assertEquals(9, store.count(ArrayKeyIndexType.class));
store.removeAllByIndexValues(
ArrayKeyIndexType.class,
KVIndex.NATURAL_INDEX_NAME,
ImmutableSet.of(new int[] {0, 0, 0}, new int[] { 2, 2, 2 }));
assertEquals(7, store.count(ArrayKeyIndexType.class));
store.removeAllByIndexValues(
ArrayKeyIndexType.class,
"id",
ImmutableSet.of(new String [] { "things" }));
assertEquals(4, store.count(ArrayKeyIndexType.class));
store.removeAllByIndexValues(
ArrayKeyIndexType.class,
"id",
ImmutableSet.of(new String [] { "more things" }));
assertEquals(0, store.count(ArrayKeyIndexType.class));
}
@Test
public void testBasicIteration() throws Exception {
KVStore store = new InMemoryStore();
CustomType1 t1 = new CustomType1();
t1.key = "1";
t1.id = "id1";
t1.name = "name1";
store.write(t1);
CustomType1 t2 = new CustomType1();
t2.key = "2";
t2.id = "id2";
t2.name = "name2";
store.write(t2);
assertEquals(t1.id, store.view(t1.getClass()).iterator().next().id);
assertEquals(t2.id, store.view(t1.getClass()).skip(1).iterator().next().id);
assertEquals(t2.id, store.view(t1.getClass()).skip(1).max(1).iterator().next().id);
assertEquals(t1.id,
store.view(t1.getClass()).first(t1.key).max(1).iterator().next().id);
assertEquals(t2.id,
store.view(t1.getClass()).first(t2.key).max(1).iterator().next().id);
assertFalse(store.view(t1.getClass()).first(t2.id).skip(1).iterator().hasNext());
}
}
| 9,945 |
0 | Create_ds/spark/common/kvstore/src/test/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/test/java/org/apache/spark/util/kvstore/DBIteratorSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
import com.google.common.collect.Iterables;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.Assert.*;
public abstract class DBIteratorSuite {
private static final Logger LOG = LoggerFactory.getLogger(DBIteratorSuite.class);
private static final int MIN_ENTRIES = 42;
private static final int MAX_ENTRIES = 1024;
private static final Random RND = new Random();
private static List<CustomType1> allEntries;
private static List<CustomType1> clashingEntries;
private static KVStore db;
private interface BaseComparator extends Comparator<CustomType1> {
/**
* Returns a comparator that falls back to natural order if this comparator's ordering
* returns equality for two elements. Used to mimic how the index sorts things internally.
*/
default BaseComparator fallback() {
return (t1, t2) -> {
int result = BaseComparator.this.compare(t1, t2);
if (result != 0) {
return result;
}
return t1.key.compareTo(t2.key);
};
}
/** Reverses the order of this comparator. */
default BaseComparator reverse() {
return (t1, t2) -> -BaseComparator.this.compare(t1, t2);
}
}
private static final BaseComparator NATURAL_ORDER = (t1, t2) -> t1.key.compareTo(t2.key);
private static final BaseComparator REF_INDEX_ORDER = (t1, t2) -> t1.id.compareTo(t2.id);
private static final BaseComparator COPY_INDEX_ORDER = (t1, t2) -> t1.name.compareTo(t2.name);
private static final BaseComparator NUMERIC_INDEX_ORDER = (t1, t2) -> {
return Integer.valueOf(t1.num).compareTo(t2.num);
};
private static final BaseComparator CHILD_INDEX_ORDER = (t1, t2) -> t1.child.compareTo(t2.child);
/**
* Implementations should override this method; it is called only once, before all tests are
* run. Any state can be safely stored in static variables and cleaned up in a @AfterClass
* handler.
*/
protected abstract KVStore createStore() throws Exception;
@BeforeClass
public static void setupClass() {
long seed = RND.nextLong();
LOG.info("Random seed: {}", seed);
RND.setSeed(seed);
}
@AfterClass
public static void cleanupData() throws Exception {
allEntries = null;
db = null;
}
@Before
public void setup() throws Exception {
if (db != null) {
return;
}
db = createStore();
int count = RND.nextInt(MAX_ENTRIES) + MIN_ENTRIES;
allEntries = new ArrayList<>(count);
for (int i = 0; i < count; i++) {
CustomType1 t = new CustomType1();
t.key = "key" + i;
t.id = "id" + i;
t.name = "name" + RND.nextInt(MAX_ENTRIES);
// Force one item to have an integer value of zero to test the fix for SPARK-23103.
t.num = (i != 0) ? (int) RND.nextLong() : 0;
t.child = "child" + (i % MIN_ENTRIES);
allEntries.add(t);
}
// Shuffle the entries to avoid the insertion order matching the natural ordering. Just in case.
Collections.shuffle(allEntries, RND);
for (CustomType1 e : allEntries) {
db.write(e);
}
// Pick the first generated value, and forcefully create a few entries that will clash
// with the indexed values (id and name), to make sure the index behaves correctly when
// multiple entities are indexed by the same value.
//
// This also serves as a test for the test code itself, to make sure it's sorting indices
// the same way the store is expected to.
CustomType1 first = allEntries.get(0);
clashingEntries = new ArrayList<>();
int clashCount = RND.nextInt(MIN_ENTRIES) + 1;
for (int i = 0; i < clashCount; i++) {
CustomType1 t = new CustomType1();
t.key = "n-key" + (count + i);
t.id = first.id;
t.name = first.name;
t.num = first.num;
t.child = first.child;
allEntries.add(t);
clashingEntries.add(t);
db.write(t);
}
// Create another entry that could cause problems: take the first entry, and make its indexed
// name be an extension of the existing ones, to make sure the implementation sorts these
// correctly even considering the separator character (shorter strings first).
CustomType1 t = new CustomType1();
t.key = "extended-key-0";
t.id = first.id;
t.name = first.name + "a";
t.num = first.num;
t.child = first.child;
allEntries.add(t);
db.write(t);
}
@Test
public void naturalIndex() throws Exception {
testIteration(NATURAL_ORDER, view(), null, null);
}
@Test
public void refIndex() throws Exception {
testIteration(REF_INDEX_ORDER, view().index("id"), null, null);
}
@Test
public void copyIndex() throws Exception {
testIteration(COPY_INDEX_ORDER, view().index("name"), null, null);
}
@Test
public void numericIndex() throws Exception {
testIteration(NUMERIC_INDEX_ORDER, view().index("int"), null, null);
}
@Test
public void childIndex() throws Exception {
CustomType1 any = pickLimit();
testIteration(CHILD_INDEX_ORDER, view().index("child").parent(any.id), null, null);
}
@Test
public void naturalIndexDescending() throws Exception {
testIteration(NATURAL_ORDER, view().reverse(), null, null);
}
@Test
public void refIndexDescending() throws Exception {
testIteration(REF_INDEX_ORDER, view().index("id").reverse(), null, null);
}
@Test
public void copyIndexDescending() throws Exception {
testIteration(COPY_INDEX_ORDER, view().index("name").reverse(), null, null);
}
@Test
public void numericIndexDescending() throws Exception {
testIteration(NUMERIC_INDEX_ORDER, view().index("int").reverse(), null, null);
}
@Test
public void childIndexDescending() throws Exception {
CustomType1 any = pickLimit();
testIteration(CHILD_INDEX_ORDER, view().index("child").parent(any.id).reverse(), null, null);
}
@Test
public void naturalIndexWithStart() throws Exception {
CustomType1 first = pickLimit();
testIteration(NATURAL_ORDER, view().first(first.key), first, null);
}
@Test
public void refIndexWithStart() throws Exception {
CustomType1 first = pickLimit();
testIteration(REF_INDEX_ORDER, view().index("id").first(first.id), first, null);
}
@Test
public void copyIndexWithStart() throws Exception {
CustomType1 first = pickLimit();
testIteration(COPY_INDEX_ORDER, view().index("name").first(first.name), first, null);
}
@Test
public void numericIndexWithStart() throws Exception {
CustomType1 first = pickLimit();
testIteration(NUMERIC_INDEX_ORDER, view().index("int").first(first.num), first, null);
}
@Test
public void childIndexWithStart() throws Exception {
CustomType1 any = pickLimit();
testIteration(CHILD_INDEX_ORDER, view().index("child").parent(any.id).first(any.child), null,
null);
}
@Test
public void naturalIndexDescendingWithStart() throws Exception {
CustomType1 first = pickLimit();
testIteration(NATURAL_ORDER, view().reverse().first(first.key), first, null);
}
@Test
public void refIndexDescendingWithStart() throws Exception {
CustomType1 first = pickLimit();
testIteration(REF_INDEX_ORDER, view().reverse().index("id").first(first.id), first, null);
}
@Test
public void copyIndexDescendingWithStart() throws Exception {
CustomType1 first = pickLimit();
testIteration(COPY_INDEX_ORDER, view().reverse().index("name").first(first.name), first, null);
}
@Test
public void numericIndexDescendingWithStart() throws Exception {
CustomType1 first = pickLimit();
testIteration(NUMERIC_INDEX_ORDER, view().reverse().index("int").first(first.num), first, null);
}
@Test
public void childIndexDescendingWithStart() throws Exception {
CustomType1 any = pickLimit();
testIteration(CHILD_INDEX_ORDER,
view().index("child").parent(any.id).first(any.child).reverse(), null, null);
}
@Test
public void naturalIndexWithSkip() throws Exception {
testIteration(NATURAL_ORDER, view().skip(pickCount()), null, null);
}
@Test
public void refIndexWithSkip() throws Exception {
testIteration(REF_INDEX_ORDER, view().index("id").skip(pickCount()), null, null);
}
@Test
public void copyIndexWithSkip() throws Exception {
testIteration(COPY_INDEX_ORDER, view().index("name").skip(pickCount()), null, null);
}
@Test
public void childIndexWithSkip() throws Exception {
CustomType1 any = pickLimit();
testIteration(CHILD_INDEX_ORDER, view().index("child").parent(any.id).skip(pickCount()),
null, null);
}
@Test
public void naturalIndexWithMax() throws Exception {
testIteration(NATURAL_ORDER, view().max(pickCount()), null, null);
}
@Test
public void copyIndexWithMax() throws Exception {
testIteration(COPY_INDEX_ORDER, view().index("name").max(pickCount()), null, null);
}
@Test
public void childIndexWithMax() throws Exception {
CustomType1 any = pickLimit();
testIteration(CHILD_INDEX_ORDER, view().index("child").parent(any.id).max(pickCount()), null,
null);
}
@Test
public void naturalIndexWithLast() throws Exception {
CustomType1 last = pickLimit();
testIteration(NATURAL_ORDER, view().last(last.key), null, last);
}
@Test
public void refIndexWithLast() throws Exception {
CustomType1 last = pickLimit();
testIteration(REF_INDEX_ORDER, view().index("id").last(last.id), null, last);
}
@Test
public void copyIndexWithLast() throws Exception {
CustomType1 last = pickLimit();
testIteration(COPY_INDEX_ORDER, view().index("name").last(last.name), null, last);
}
@Test
public void numericIndexWithLast() throws Exception {
CustomType1 last = pickLimit();
testIteration(NUMERIC_INDEX_ORDER, view().index("int").last(last.num), null, last);
}
@Test
public void childIndexWithLast() throws Exception {
CustomType1 any = pickLimit();
testIteration(CHILD_INDEX_ORDER, view().index("child").parent(any.id).last(any.child), null,
null);
}
@Test
public void naturalIndexDescendingWithLast() throws Exception {
CustomType1 last = pickLimit();
testIteration(NATURAL_ORDER, view().reverse().last(last.key), null, last);
}
@Test
public void refIndexDescendingWithLast() throws Exception {
CustomType1 last = pickLimit();
testIteration(REF_INDEX_ORDER, view().reverse().index("id").last(last.id), null, last);
}
@Test
public void copyIndexDescendingWithLast() throws Exception {
CustomType1 last = pickLimit();
testIteration(COPY_INDEX_ORDER, view().reverse().index("name").last(last.name),
null, last);
}
@Test
public void numericIndexDescendingWithLast() throws Exception {
CustomType1 last = pickLimit();
testIteration(NUMERIC_INDEX_ORDER, view().reverse().index("int").last(last.num),
null, last);
}
@Test
public void childIndexDescendingWithLast() throws Exception {
CustomType1 any = pickLimit();
testIteration(CHILD_INDEX_ORDER, view().index("child").parent(any.id).last(any.child).reverse(),
null, null);
}
@Test
public void testRefWithIntNaturalKey() throws Exception {
LevelDBSuite.IntKeyType i = new LevelDBSuite.IntKeyType();
i.key = 1;
i.id = "1";
i.values = Arrays.asList("1");
db.write(i);
try(KVStoreIterator<?> it = db.view(i.getClass()).closeableIterator()) {
Object read = it.next();
assertEquals(i, read);
}
}
private CustomType1 pickLimit() {
// Picks an element that has clashes with other elements in the given index.
return clashingEntries.get(RND.nextInt(clashingEntries.size()));
}
private int pickCount() {
int count = RND.nextInt(allEntries.size() / 2);
return Math.max(count, 1);
}
/**
* Compares the two values and falls back to comparing the natural key of CustomType1
* if they're the same, to mimic the behavior of the indexing code.
*/
private <T extends Comparable<T>> int compareWithFallback(
T v1,
T v2,
CustomType1 ct1,
CustomType1 ct2) {
int result = v1.compareTo(v2);
if (result != 0) {
return result;
}
return ct1.key.compareTo(ct2.key);
}
private void testIteration(
final BaseComparator order,
final KVStoreView<CustomType1> params,
final CustomType1 first,
final CustomType1 last) throws Exception {
List<CustomType1> indexOrder = sortBy(order.fallback());
if (!params.ascending) {
indexOrder = Lists.reverse(indexOrder);
}
Iterable<CustomType1> expected = indexOrder;
BaseComparator expectedOrder = params.ascending ? order : order.reverse();
if (params.parent != null) {
expected = Iterables.filter(expected, v -> params.parent.equals(v.id));
}
if (first != null) {
expected = Iterables.filter(expected, v -> expectedOrder.compare(first, v) <= 0);
}
if (last != null) {
expected = Iterables.filter(expected, v -> expectedOrder.compare(v, last) <= 0);
}
if (params.skip > 0) {
expected = Iterables.skip(expected, (int) params.skip);
}
if (params.max != Long.MAX_VALUE) {
expected = Iterables.limit(expected, (int) params.max);
}
List<CustomType1> actual = collect(params);
compareLists(expected, actual);
}
/** Could use assertEquals(), but that creates hard to read errors for large lists. */
private void compareLists(Iterable<?> expected, List<?> actual) {
Iterator<?> expectedIt = expected.iterator();
Iterator<?> actualIt = actual.iterator();
int count = 0;
while (expectedIt.hasNext()) {
if (!actualIt.hasNext()) {
break;
}
count++;
assertEquals(expectedIt.next(), actualIt.next());
}
String message;
Object[] remaining;
int expectedCount = count;
int actualCount = count;
if (expectedIt.hasNext()) {
remaining = Iterators.toArray(expectedIt, Object.class);
expectedCount += remaining.length;
message = "missing";
} else {
remaining = Iterators.toArray(actualIt, Object.class);
actualCount += remaining.length;
message = "stray";
}
assertEquals(String.format("Found %s elements: %s", message, Arrays.asList(remaining)),
expectedCount, actualCount);
}
private KVStoreView<CustomType1> view() throws Exception {
return db.view(CustomType1.class);
}
private List<CustomType1> collect(KVStoreView<CustomType1> view) throws Exception {
return Arrays.asList(Iterables.toArray(view, CustomType1.class));
}
private List<CustomType1> sortBy(Comparator<CustomType1> comp) {
List<CustomType1> copy = new ArrayList<>(allEntries);
Collections.sort(copy, comp);
return copy;
}
}
| 9,946 |
0 | Create_ds/spark/common/kvstore/src/test/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/test/java/org/apache/spark/util/kvstore/LevelDBSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import java.io.File;
import java.util.Arrays;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import com.google.common.collect.ImmutableSet;
import org.apache.commons.io.FileUtils;
import org.iq80.leveldb.DBIterator;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
public class LevelDBSuite {
private LevelDB db;
private File dbpath;
@After
public void cleanup() throws Exception {
if (db != null) {
db.close();
}
if (dbpath != null) {
FileUtils.deleteQuietly(dbpath);
}
}
@Before
public void setup() throws Exception {
dbpath = File.createTempFile("test.", ".ldb");
dbpath.delete();
db = new LevelDB(dbpath);
}
@Test
public void testReopenAndVersionCheckDb() throws Exception {
db.close();
db = null;
assertTrue(dbpath.exists());
db = new LevelDB(dbpath);
assertEquals(LevelDB.STORE_VERSION,
db.serializer.deserializeLong(db.db().get(LevelDB.STORE_VERSION_KEY)));
db.db().put(LevelDB.STORE_VERSION_KEY, db.serializer.serialize(LevelDB.STORE_VERSION + 1));
db.close();
db = null;
try {
db = new LevelDB(dbpath);
fail("Should have failed version check.");
} catch (UnsupportedStoreVersionException e) {
// Expected.
}
}
@Test
public void testObjectWriteReadDelete() throws Exception {
CustomType1 t = createCustomType1(1);
try {
db.read(CustomType1.class, t.key);
fail("Expected exception for non-existent object.");
} catch (NoSuchElementException nsee) {
// Expected.
}
db.write(t);
assertEquals(t, db.read(t.getClass(), t.key));
assertEquals(1L, db.count(t.getClass()));
db.delete(t.getClass(), t.key);
try {
db.read(t.getClass(), t.key);
fail("Expected exception for deleted object.");
} catch (NoSuchElementException nsee) {
// Expected.
}
// Look into the actual DB and make sure that all the keys related to the type have been
// removed.
assertEquals(0, countKeys(t.getClass()));
}
@Test
public void testMultipleObjectWriteReadDelete() throws Exception {
CustomType1 t1 = createCustomType1(1);
CustomType1 t2 = createCustomType1(2);
t2.id = t1.id;
db.write(t1);
db.write(t2);
assertEquals(t1, db.read(t1.getClass(), t1.key));
assertEquals(t2, db.read(t2.getClass(), t2.key));
assertEquals(2L, db.count(t1.getClass()));
// There should be one "id" index entry with two values.
assertEquals(2, db.count(t1.getClass(), "id", t1.id));
// Delete the first entry; now there should be 3 remaining keys, since one of the "name"
// index entries should have been removed.
db.delete(t1.getClass(), t1.key);
// Make sure there's a single entry in the "id" index now.
assertEquals(1, db.count(t2.getClass(), "id", t2.id));
// Delete the remaining entry, make sure all data is gone.
db.delete(t2.getClass(), t2.key);
assertEquals(0, countKeys(t2.getClass()));
}
@Test
public void testMultipleTypesWriteReadDelete() throws Exception {
CustomType1 t1 = createCustomType1(1);
IntKeyType t2 = new IntKeyType();
t2.key = 2;
t2.id = "2";
t2.values = Arrays.asList("value1", "value2");
ArrayKeyIndexType t3 = new ArrayKeyIndexType();
t3.key = new int[] { 42, 84 };
t3.id = new String[] { "id1", "id2" };
db.write(t1);
db.write(t2);
db.write(t3);
assertEquals(t1, db.read(t1.getClass(), t1.key));
assertEquals(t2, db.read(t2.getClass(), t2.key));
assertEquals(t3, db.read(t3.getClass(), t3.key));
// There should be one "id" index with a single entry for each type.
assertEquals(1, db.count(t1.getClass(), "id", t1.id));
assertEquals(1, db.count(t2.getClass(), "id", t2.id));
assertEquals(1, db.count(t3.getClass(), "id", t3.id));
// Delete the first entry; this should not affect the entries for the second type.
db.delete(t1.getClass(), t1.key);
assertEquals(0, countKeys(t1.getClass()));
assertEquals(1, db.count(t2.getClass(), "id", t2.id));
assertEquals(1, db.count(t3.getClass(), "id", t3.id));
// Delete the remaining entries, make sure all data is gone.
db.delete(t2.getClass(), t2.key);
assertEquals(0, countKeys(t2.getClass()));
db.delete(t3.getClass(), t3.key);
assertEquals(0, countKeys(t3.getClass()));
}
@Test
public void testMetadata() throws Exception {
assertNull(db.getMetadata(CustomType1.class));
CustomType1 t = createCustomType1(1);
db.setMetadata(t);
assertEquals(t, db.getMetadata(CustomType1.class));
db.setMetadata(null);
assertNull(db.getMetadata(CustomType1.class));
}
@Test
public void testUpdate() throws Exception {
CustomType1 t = createCustomType1(1);
db.write(t);
t.name = "anotherName";
db.write(t);
assertEquals(1, db.count(t.getClass()));
assertEquals(1, db.count(t.getClass(), "name", "anotherName"));
assertEquals(0, db.count(t.getClass(), "name", "name"));
}
@Test
public void testRemoveAll() throws Exception {
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
ArrayKeyIndexType o = new ArrayKeyIndexType();
o.key = new int[] { i, j, 0 };
o.id = new String[] { "things" };
db.write(o);
o = new ArrayKeyIndexType();
o.key = new int[] { i, j, 1 };
o.id = new String[] { "more things" };
db.write(o);
}
}
ArrayKeyIndexType o = new ArrayKeyIndexType();
o.key = new int[] { 2, 2, 2 };
o.id = new String[] { "things" };
db.write(o);
assertEquals(9, db.count(ArrayKeyIndexType.class));
db.removeAllByIndexValues(
ArrayKeyIndexType.class,
KVIndex.NATURAL_INDEX_NAME,
ImmutableSet.of(new int[] {0, 0, 0}, new int[] { 2, 2, 2 }));
assertEquals(7, db.count(ArrayKeyIndexType.class));
db.removeAllByIndexValues(
ArrayKeyIndexType.class,
"id",
ImmutableSet.of(new String[] { "things" }));
assertEquals(4, db.count(ArrayKeyIndexType.class));
db.removeAllByIndexValues(
ArrayKeyIndexType.class,
"id",
ImmutableSet.of(new String[] { "more things" }));
assertEquals(0, db.count(ArrayKeyIndexType.class));
}
@Test
public void testSkip() throws Exception {
for (int i = 0; i < 10; i++) {
db.write(createCustomType1(i));
}
KVStoreIterator<CustomType1> it = db.view(CustomType1.class).closeableIterator();
assertTrue(it.hasNext());
assertTrue(it.skip(5));
assertEquals("key5", it.next().key);
assertTrue(it.skip(3));
assertEquals("key9", it.next().key);
assertFalse(it.hasNext());
}
@Test
public void testNegativeIndexValues() throws Exception {
List<Integer> expected = Arrays.asList(-100, -50, 0, 50, 100);
expected.stream().forEach(i -> {
try {
db.write(createCustomType1(i));
} catch (Exception e) {
throw new RuntimeException(e);
}
});
List<Integer> results = StreamSupport
.stream(db.view(CustomType1.class).index("int").spliterator(), false)
.map(e -> e.num)
.collect(Collectors.toList());
assertEquals(expected, results);
}
private CustomType1 createCustomType1(int i) {
CustomType1 t = new CustomType1();
t.key = "key" + i;
t.id = "id" + i;
t.name = "name" + i;
t.num = i;
t.child = "child" + i;
return t;
}
private int countKeys(Class<?> type) throws Exception {
byte[] prefix = db.getTypeInfo(type).keyPrefix();
int count = 0;
DBIterator it = db.db().iterator();
it.seek(prefix);
while (it.hasNext()) {
byte[] key = it.next().getKey();
if (LevelDBIterator.startsWith(key, prefix)) {
count++;
}
}
return count;
}
public static class IntKeyType {
@KVIndex
public int key;
@KVIndex("id")
public String id;
public List<String> values;
@Override
public boolean equals(Object o) {
if (o instanceof IntKeyType) {
IntKeyType other = (IntKeyType) o;
return key == other.key && id.equals(other.id) && values.equals(other.values);
}
return false;
}
@Override
public int hashCode() {
return id.hashCode();
}
}
}
| 9,947 |
0 | Create_ds/spark/common/kvstore/src/test/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/test/java/org/apache/spark/util/kvstore/LevelDBIteratorSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import java.io.File;
import org.apache.commons.io.FileUtils;
import org.junit.AfterClass;
public class LevelDBIteratorSuite extends DBIteratorSuite {
private static File dbpath;
private static LevelDB db;
@AfterClass
public static void cleanup() throws Exception {
if (db != null) {
db.close();
}
if (dbpath != null) {
FileUtils.deleteQuietly(dbpath);
}
}
@Override
protected KVStore createStore() throws Exception {
dbpath = File.createTempFile("test.", ".ldb");
dbpath.delete();
db = new LevelDB(dbpath);
return db;
}
}
| 9,948 |
0 | Create_ds/spark/common/kvstore/src/test/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/test/java/org/apache/spark/util/kvstore/CustomType1.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import com.google.common.base.Objects;
public class CustomType1 {
@KVIndex
public String key;
@KVIndex("id")
public String id;
@KVIndex(value = "name", copy = true)
public String name;
@KVIndex("int")
public int num;
@KVIndex(value = "child", parent = "id")
public String child;
@Override
public boolean equals(Object o) {
if (o instanceof CustomType1) {
CustomType1 other = (CustomType1) o;
return id.equals(other.id) && name.equals(other.name);
}
return false;
}
@Override
public int hashCode() {
return id.hashCode();
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("key", key)
.add("id", id)
.add("name", name)
.add("num", num)
.toString();
}
}
| 9,949 |
0 | Create_ds/spark/common/kvstore/src/test/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/test/java/org/apache/spark/util/kvstore/ArrayKeyIndexType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import java.util.Arrays;
public class ArrayKeyIndexType {
@KVIndex
public int[] key;
@KVIndex("id")
public String[] id;
@Override
public boolean equals(Object o) {
if (o instanceof ArrayKeyIndexType) {
ArrayKeyIndexType other = (ArrayKeyIndexType) o;
return Arrays.equals(key, other.key) && Arrays.equals(id, other.id);
}
return false;
}
@Override
public int hashCode() {
return key.hashCode();
}
}
| 9,950 |
0 | Create_ds/spark/common/kvstore/src/test/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/test/java/org/apache/spark/util/kvstore/LevelDBBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import java.io.File;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Slf4jReporter;
import com.codahale.metrics.Snapshot;
import com.codahale.metrics.Timer;
import org.apache.commons.io.FileUtils;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.slf4j.LoggerFactory;
import static org.junit.Assert.*;
/**
* A set of small benchmarks for the LevelDB implementation.
*
* The benchmarks are run over two different types (one with just a natural index, and one
* with a ref index), over a set of 2^20 elements, and the following tests are performed:
*
* - write (then update) elements in sequential natural key order
* - write (then update) elements in random natural key order
* - iterate over natural index, ascending and descending
* - iterate over ref index, ascending and descending
*/
@Ignore
public class LevelDBBenchmark {
private static final int COUNT = 1024;
private static final AtomicInteger IDGEN = new AtomicInteger();
private static final MetricRegistry metrics = new MetricRegistry();
private static final Timer dbCreation = metrics.timer("dbCreation");
private static final Timer dbClose = metrics.timer("dbClose");
private LevelDB db;
private File dbpath;
@Before
public void setup() throws Exception {
dbpath = File.createTempFile("test.", ".ldb");
dbpath.delete();
try(Timer.Context ctx = dbCreation.time()) {
db = new LevelDB(dbpath);
}
}
@After
public void cleanup() throws Exception {
if (db != null) {
try(Timer.Context ctx = dbClose.time()) {
db.close();
}
}
if (dbpath != null) {
FileUtils.deleteQuietly(dbpath);
}
}
@AfterClass
public static void report() {
if (metrics.getTimers().isEmpty()) {
return;
}
int headingPrefix = 0;
for (Map.Entry<String, Timer> e : metrics.getTimers().entrySet()) {
headingPrefix = Math.max(e.getKey().length(), headingPrefix);
}
headingPrefix += 4;
StringBuilder heading = new StringBuilder();
for (int i = 0; i < headingPrefix; i++) {
heading.append(" ");
}
heading.append("\tcount");
heading.append("\tmean");
heading.append("\tmin");
heading.append("\tmax");
heading.append("\t95th");
System.out.println(heading);
for (Map.Entry<String, Timer> e : metrics.getTimers().entrySet()) {
StringBuilder row = new StringBuilder();
row.append(e.getKey());
for (int i = 0; i < headingPrefix - e.getKey().length(); i++) {
row.append(" ");
}
Snapshot s = e.getValue().getSnapshot();
row.append("\t").append(e.getValue().getCount());
row.append("\t").append(toMs(s.getMean()));
row.append("\t").append(toMs(s.getMin()));
row.append("\t").append(toMs(s.getMax()));
row.append("\t").append(toMs(s.get95thPercentile()));
System.out.println(row);
}
Slf4jReporter.forRegistry(metrics).outputTo(LoggerFactory.getLogger(LevelDBBenchmark.class))
.build().report();
}
private static String toMs(double nanos) {
return String.format("%.3f", nanos / 1000 / 1000);
}
@Test
public void sequentialWritesNoIndex() throws Exception {
List<SimpleType> entries = createSimpleType();
writeAll(entries, "sequentialWritesNoIndex");
writeAll(entries, "sequentialUpdatesNoIndex");
deleteNoIndex(entries, "sequentialDeleteNoIndex");
}
@Test
public void randomWritesNoIndex() throws Exception {
List<SimpleType> entries = createSimpleType();
Collections.shuffle(entries);
writeAll(entries, "randomWritesNoIndex");
Collections.shuffle(entries);
writeAll(entries, "randomUpdatesNoIndex");
Collections.shuffle(entries);
deleteNoIndex(entries, "randomDeletesNoIndex");
}
@Test
public void sequentialWritesIndexedType() throws Exception {
List<IndexedType> entries = createIndexedType();
writeAll(entries, "sequentialWritesIndexed");
writeAll(entries, "sequentialUpdatesIndexed");
deleteIndexed(entries, "sequentialDeleteIndexed");
}
@Test
public void randomWritesIndexedTypeAndIteration() throws Exception {
List<IndexedType> entries = createIndexedType();
Collections.shuffle(entries);
writeAll(entries, "randomWritesIndexed");
Collections.shuffle(entries);
writeAll(entries, "randomUpdatesIndexed");
// Run iteration benchmarks here since we've gone through the trouble of writing all
// the data already.
KVStoreView<?> view = db.view(IndexedType.class);
iterate(view, "naturalIndex");
iterate(view.reverse(), "naturalIndexDescending");
iterate(view.index("name"), "refIndex");
iterate(view.index("name").reverse(), "refIndexDescending");
Collections.shuffle(entries);
deleteIndexed(entries, "randomDeleteIndexed");
}
private void iterate(KVStoreView<?> view, String name) throws Exception {
Timer create = metrics.timer(name + "CreateIterator");
Timer iter = metrics.timer(name + "Iteration");
KVStoreIterator<?> it = null;
{
// Create the iterator several times, just to have multiple data points.
for (int i = 0; i < 1024; i++) {
if (it != null) {
it.close();
}
try(Timer.Context ctx = create.time()) {
it = view.closeableIterator();
}
}
}
for (; it.hasNext(); ) {
try(Timer.Context ctx = iter.time()) {
it.next();
}
}
}
private void writeAll(List<?> entries, String timerName) throws Exception {
Timer timer = newTimer(timerName);
for (Object o : entries) {
try(Timer.Context ctx = timer.time()) {
db.write(o);
}
}
}
private void deleteNoIndex(List<SimpleType> entries, String timerName) throws Exception {
Timer delete = newTimer(timerName);
for (SimpleType i : entries) {
try(Timer.Context ctx = delete.time()) {
db.delete(i.getClass(), i.key);
}
}
}
private void deleteIndexed(List<IndexedType> entries, String timerName) throws Exception {
Timer delete = newTimer(timerName);
for (IndexedType i : entries) {
try(Timer.Context ctx = delete.time()) {
db.delete(i.getClass(), i.key);
}
}
}
private List<SimpleType> createSimpleType() {
List<SimpleType> entries = new ArrayList<>();
for (int i = 0; i < COUNT; i++) {
SimpleType t = new SimpleType();
t.key = IDGEN.getAndIncrement();
t.name = "name" + (t.key % 1024);
entries.add(t);
}
return entries;
}
private List<IndexedType> createIndexedType() {
List<IndexedType> entries = new ArrayList<>();
for (int i = 0; i < COUNT; i++) {
IndexedType t = new IndexedType();
t.key = IDGEN.getAndIncrement();
t.name = "name" + (t.key % 1024);
entries.add(t);
}
return entries;
}
private Timer newTimer(String name) {
assertNull("Timer already exists: " + name, metrics.getTimers().get(name));
return metrics.timer(name);
}
public static class SimpleType {
@KVIndex
public int key;
public String name;
}
public static class IndexedType {
@KVIndex
public int key;
@KVIndex("name")
public String name;
}
}
| 9,951 |
0 | Create_ds/spark/common/kvstore/src/test/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/test/java/org/apache/spark/util/kvstore/ArrayWrappersSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import org.junit.Test;
import static org.junit.Assert.*;
public class ArrayWrappersSuite {
@Test
public void testGenericArrayKey() {
byte[] b1 = new byte[] { 0x01, 0x02, 0x03 };
byte[] b2 = new byte[] { 0x01, 0x02 };
int[] i1 = new int[] { 1, 2, 3 };
int[] i2 = new int[] { 1, 2 };
String[] s1 = new String[] { "1", "2", "3" };
String[] s2 = new String[] { "1", "2" };
assertEquals(ArrayWrappers.forArray(b1), ArrayWrappers.forArray(b1));
assertNotEquals(ArrayWrappers.forArray(b1), ArrayWrappers.forArray(b2));
assertNotEquals(ArrayWrappers.forArray(b1), ArrayWrappers.forArray(i1));
assertNotEquals(ArrayWrappers.forArray(b1), ArrayWrappers.forArray(s1));
assertEquals(ArrayWrappers.forArray(i1), ArrayWrappers.forArray(i1));
assertNotEquals(ArrayWrappers.forArray(i1), ArrayWrappers.forArray(i2));
assertNotEquals(ArrayWrappers.forArray(i1), ArrayWrappers.forArray(b1));
assertNotEquals(ArrayWrappers.forArray(i1), ArrayWrappers.forArray(s1));
assertEquals(ArrayWrappers.forArray(s1), ArrayWrappers.forArray(s1));
assertNotEquals(ArrayWrappers.forArray(s1), ArrayWrappers.forArray(s2));
assertNotEquals(ArrayWrappers.forArray(s1), ArrayWrappers.forArray(b1));
assertNotEquals(ArrayWrappers.forArray(s1), ArrayWrappers.forArray(i1));
assertEquals(0, ArrayWrappers.forArray(b1).compareTo(ArrayWrappers.forArray(b1)));
assertTrue(ArrayWrappers.forArray(b1).compareTo(ArrayWrappers.forArray(b2)) > 0);
assertEquals(0, ArrayWrappers.forArray(i1).compareTo(ArrayWrappers.forArray(i1)));
assertTrue(ArrayWrappers.forArray(i1).compareTo(ArrayWrappers.forArray(i2)) > 0);
assertEquals(0, ArrayWrappers.forArray(s1).compareTo(ArrayWrappers.forArray(s1)));
assertTrue(ArrayWrappers.forArray(s1).compareTo(ArrayWrappers.forArray(s2)) > 0);
}
}
| 9,952 |
0 | Create_ds/spark/common/kvstore/src/test/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/test/java/org/apache/spark/util/kvstore/LevelDBTypeInfoSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import static java.nio.charset.StandardCharsets.UTF_8;
import org.junit.Test;
import static org.junit.Assert.*;
public class LevelDBTypeInfoSuite {
@Test
public void testIndexAnnotation() throws Exception {
KVTypeInfo ti = new KVTypeInfo(CustomType1.class);
assertEquals(5, ti.indices().count());
CustomType1 t1 = new CustomType1();
t1.key = "key";
t1.id = "id";
t1.name = "name";
t1.num = 42;
t1.child = "child";
assertEquals(t1.key, ti.getIndexValue(KVIndex.NATURAL_INDEX_NAME, t1));
assertEquals(t1.id, ti.getIndexValue("id", t1));
assertEquals(t1.name, ti.getIndexValue("name", t1));
assertEquals(t1.num, ti.getIndexValue("int", t1));
assertEquals(t1.child, ti.getIndexValue("child", t1));
}
@Test(expected = IllegalArgumentException.class)
public void testNoNaturalIndex() throws Exception {
newTypeInfo(NoNaturalIndex.class);
}
@Test(expected = IllegalArgumentException.class)
public void testNoNaturalIndex2() throws Exception {
newTypeInfo(NoNaturalIndex2.class);
}
@Test(expected = IllegalArgumentException.class)
public void testDuplicateIndex() throws Exception {
newTypeInfo(DuplicateIndex.class);
}
@Test(expected = IllegalArgumentException.class)
public void testEmptyIndexName() throws Exception {
newTypeInfo(EmptyIndexName.class);
}
@Test(expected = IllegalArgumentException.class)
public void testIllegalIndexName() throws Exception {
newTypeInfo(IllegalIndexName.class);
}
@Test(expected = IllegalArgumentException.class)
public void testIllegalIndexMethod() throws Exception {
newTypeInfo(IllegalIndexMethod.class);
}
@Test
public void testKeyClashes() throws Exception {
LevelDBTypeInfo ti = newTypeInfo(CustomType1.class);
CustomType1 t1 = new CustomType1();
t1.key = "key1";
t1.name = "a";
CustomType1 t2 = new CustomType1();
t2.key = "key2";
t2.name = "aa";
CustomType1 t3 = new CustomType1();
t3.key = "key3";
t3.name = "aaa";
// Make sure entries with conflicting names are sorted correctly.
assertBefore(ti.index("name").entityKey(null, t1), ti.index("name").entityKey(null, t2));
assertBefore(ti.index("name").entityKey(null, t1), ti.index("name").entityKey(null, t3));
assertBefore(ti.index("name").entityKey(null, t2), ti.index("name").entityKey(null, t3));
}
@Test
public void testNumEncoding() throws Exception {
LevelDBTypeInfo.Index idx = newTypeInfo(CustomType1.class).indices().iterator().next();
assertEquals("+=00000001", new String(idx.toKey(1), UTF_8));
assertEquals("+=00000010", new String(idx.toKey(16), UTF_8));
assertEquals("+=7fffffff", new String(idx.toKey(Integer.MAX_VALUE), UTF_8));
assertBefore(idx.toKey(1), idx.toKey(2));
assertBefore(idx.toKey(-1), idx.toKey(2));
assertBefore(idx.toKey(-11), idx.toKey(2));
assertBefore(idx.toKey(-11), idx.toKey(-1));
assertBefore(idx.toKey(1), idx.toKey(11));
assertBefore(idx.toKey(Integer.MIN_VALUE), idx.toKey(Integer.MAX_VALUE));
assertBefore(idx.toKey(1L), idx.toKey(2L));
assertBefore(idx.toKey(-1L), idx.toKey(2L));
assertBefore(idx.toKey(Long.MIN_VALUE), idx.toKey(Long.MAX_VALUE));
assertBefore(idx.toKey((short) 1), idx.toKey((short) 2));
assertBefore(idx.toKey((short) -1), idx.toKey((short) 2));
assertBefore(idx.toKey(Short.MIN_VALUE), idx.toKey(Short.MAX_VALUE));
assertBefore(idx.toKey((byte) 1), idx.toKey((byte) 2));
assertBefore(idx.toKey((byte) -1), idx.toKey((byte) 2));
assertBefore(idx.toKey(Byte.MIN_VALUE), idx.toKey(Byte.MAX_VALUE));
byte prefix = LevelDBTypeInfo.ENTRY_PREFIX;
assertSame(new byte[] { prefix, LevelDBTypeInfo.FALSE }, idx.toKey(false));
assertSame(new byte[] { prefix, LevelDBTypeInfo.TRUE }, idx.toKey(true));
}
@Test
public void testArrayIndices() throws Exception {
LevelDBTypeInfo.Index idx = newTypeInfo(CustomType1.class).indices().iterator().next();
assertBefore(idx.toKey(new String[] { "str1" }), idx.toKey(new String[] { "str2" }));
assertBefore(idx.toKey(new String[] { "str1", "str2" }),
idx.toKey(new String[] { "str1", "str3" }));
assertBefore(idx.toKey(new int[] { 1 }), idx.toKey(new int[] { 2 }));
assertBefore(idx.toKey(new int[] { 1, 2 }), idx.toKey(new int[] { 1, 3 }));
}
private LevelDBTypeInfo newTypeInfo(Class<?> type) throws Exception {
return new LevelDBTypeInfo(null, type, type.getName().getBytes(UTF_8));
}
private void assertBefore(byte[] key1, byte[] key2) {
assertBefore(new String(key1, UTF_8), new String(key2, UTF_8));
}
private void assertBefore(String str1, String str2) {
assertTrue(String.format("%s < %s failed", str1, str2), str1.compareTo(str2) < 0);
}
private void assertSame(byte[] key1, byte[] key2) {
assertEquals(new String(key1, UTF_8), new String(key2, UTF_8));
}
public static class NoNaturalIndex {
public String id;
}
public static class NoNaturalIndex2 {
@KVIndex("id")
public String id;
}
public static class DuplicateIndex {
@KVIndex
public String key;
@KVIndex("id")
public String id;
@KVIndex("id")
public String id2;
}
public static class EmptyIndexName {
@KVIndex("")
public String id;
}
public static class IllegalIndexName {
@KVIndex("__invalid")
public String id;
}
public static class IllegalIndexMethod {
@KVIndex("id")
public String id(boolean illegalParam) {
return null;
}
}
}
| 9,953 |
0 | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util/kvstore/LevelDBTypeInfo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import java.lang.reflect.Array;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.common.base.Preconditions;
import org.iq80.leveldb.WriteBatch;
/**
* Holds metadata about app-specific types stored in LevelDB. Serves as a cache for data collected
* via reflection, to make it cheaper to access it multiple times.
*
* <p>
* The hierarchy of keys stored in LevelDB looks roughly like the following. This hierarchy ensures
* that iteration over indices is easy, and that updating values in the store is not overly
* expensive. Of note, indices choose using more disk space (one value per key) instead of keeping
* lists of pointers, which would be more expensive to update at runtime.
* </p>
*
* <p>
* Indentation defines when a sub-key lives under a parent key. In LevelDB, this means the full
* key would be the concatenation of everything up to that point in the hierarchy, with each
* component separated by a NULL byte.
* </p>
*
* <pre>
* +TYPE_NAME
* NATURAL_INDEX
* +NATURAL_KEY
* -
* -NATURAL_INDEX
* INDEX_NAME
* +INDEX_VALUE
* +NATURAL_KEY
* -INDEX_VALUE
* .INDEX_VALUE
* CHILD_INDEX_NAME
* +CHILD_INDEX_VALUE
* NATURAL_KEY_OR_DATA
* -
* -INDEX_NAME
* </pre>
*
* <p>
* Entity data (either the entity's natural key or a copy of the data) is stored in all keys
* that end with "+<something>". A count of all objects that match a particular top-level index
* value is kept at the end marker ("-<something>"). A count is also kept at the natural index's end
* marker, to make it easy to retrieve the number of all elements of a particular type.
* </p>
*
* <p>
* To illustrate, given a type "Foo", with a natural index and a second index called "bar", you'd
* have these keys and values in the store for two instances, one with natural key "key1" and the
* other "key2", both with value "yes" for "bar":
* </p>
*
* <pre>
* Foo __main__ +key1 [data for instance 1]
* Foo __main__ +key2 [data for instance 2]
* Foo __main__ - [count of all Foo]
* Foo bar +yes +key1 [instance 1 key or data, depending on index type]
* Foo bar +yes +key2 [instance 2 key or data, depending on index type]
* Foo bar +yes - [count of all Foo with "bar=yes" ]
* </pre>
*
* <p>
* Note that all indexed values are prepended with "+", even if the index itself does not have an
* explicit end marker. This allows for easily skipping to the end of an index by telling LevelDB
* to seek to the "phantom" end marker of the index. Throughout the code and comments, this part
* of the full LevelDB key is generally referred to as the "index value" of the entity.
* </p>
*
* <p>
* Child indices are stored after their parent index. In the example above, let's assume there is
* a child index "child", whose parent is "bar". If both instances have value "no" for this field,
* the data in the store would look something like the following:
* </p>
*
* <pre>
* ...
* Foo bar +yes -
* Foo bar .yes .child +no +key1 [instance 1 key or data, depending on index type]
* Foo bar .yes .child +no +key2 [instance 2 key or data, depending on index type]
* ...
* </pre>
*/
class LevelDBTypeInfo {
static final byte[] END_MARKER = new byte[] { '-' };
static final byte ENTRY_PREFIX = (byte) '+';
static final byte KEY_SEPARATOR = 0x0;
static byte TRUE = (byte) '1';
static byte FALSE = (byte) '0';
private static final byte SECONDARY_IDX_PREFIX = (byte) '.';
private static final byte POSITIVE_MARKER = (byte) '=';
private static final byte NEGATIVE_MARKER = (byte) '*';
private static final byte[] HEX_BYTES = new byte[] {
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
};
private final LevelDB db;
private final Class<?> type;
private final Map<String, Index> indices;
private final byte[] typePrefix;
LevelDBTypeInfo(LevelDB db, Class<?> type, byte[] alias) throws Exception {
this.db = db;
this.type = type;
this.indices = new HashMap<>();
KVTypeInfo ti = new KVTypeInfo(type);
// First create the parent indices, then the child indices.
ti.indices().forEach(idx -> {
if (idx.parent().isEmpty()) {
indices.put(idx.value(), new Index(idx, ti.getAccessor(idx.value()), null));
}
});
ti.indices().forEach(idx -> {
if (!idx.parent().isEmpty()) {
indices.put(idx.value(), new Index(idx, ti.getAccessor(idx.value()),
indices.get(idx.parent())));
}
});
this.typePrefix = alias;
}
Class<?> type() {
return type;
}
byte[] keyPrefix() {
return typePrefix;
}
Index naturalIndex() {
return index(KVIndex.NATURAL_INDEX_NAME);
}
Index index(String name) {
Index i = indices.get(name);
Preconditions.checkArgument(i != null, "Index %s does not exist for type %s.", name,
type.getName());
return i;
}
Collection<Index> indices() {
return indices.values();
}
byte[] buildKey(byte[]... components) {
return buildKey(true, components);
}
byte[] buildKey(boolean addTypePrefix, byte[]... components) {
int len = 0;
if (addTypePrefix) {
len += typePrefix.length + 1;
}
for (byte[] comp : components) {
len += comp.length;
}
len += components.length - 1;
byte[] dest = new byte[len];
int written = 0;
if (addTypePrefix) {
System.arraycopy(typePrefix, 0, dest, 0, typePrefix.length);
dest[typePrefix.length] = KEY_SEPARATOR;
written += typePrefix.length + 1;
}
for (byte[] comp : components) {
System.arraycopy(comp, 0, dest, written, comp.length);
written += comp.length;
if (written < dest.length) {
dest[written] = KEY_SEPARATOR;
written++;
}
}
return dest;
}
/**
* Models a single index in LevelDB. See top-level class's javadoc for a description of how the
* keys are generated.
*/
class Index {
private final boolean copy;
private final boolean isNatural;
private final byte[] name;
private final KVTypeInfo.Accessor accessor;
private final Index parent;
private Index(KVIndex self, KVTypeInfo.Accessor accessor, Index parent) {
byte[] name = self.value().getBytes(UTF_8);
if (parent != null) {
byte[] child = new byte[name.length + 1];
child[0] = SECONDARY_IDX_PREFIX;
System.arraycopy(name, 0, child, 1, name.length);
}
this.name = name;
this.isNatural = self.value().equals(KVIndex.NATURAL_INDEX_NAME);
this.copy = isNatural || self.copy();
this.accessor = accessor;
this.parent = parent;
}
boolean isCopy() {
return copy;
}
boolean isChild() {
return parent != null;
}
Index parent() {
return parent;
}
/**
* Creates a key prefix for child indices of this index. This allows the prefix to be
* calculated only once, avoiding redundant work when multiple child indices of the
* same parent index exist.
*/
byte[] childPrefix(Object value) {
Preconditions.checkState(parent == null, "Not a parent index.");
return buildKey(name, toParentKey(value));
}
/**
* Gets the index value for a particular entity (which is the value of the field or method
* tagged with the index annotation). This is used as part of the LevelDB key where the
* entity (or its id) is stored.
*/
Object getValue(Object entity) throws Exception {
return accessor.get(entity);
}
private void checkParent(byte[] prefix) {
if (prefix != null) {
Preconditions.checkState(parent != null, "Parent prefix provided for parent index.");
} else {
Preconditions.checkState(parent == null, "Parent prefix missing for child index.");
}
}
/** The prefix for all keys that belong to this index. */
byte[] keyPrefix(byte[] prefix) {
checkParent(prefix);
return (parent != null) ? buildKey(false, prefix, name) : buildKey(name);
}
/**
* The key where to start ascending iteration for entities whose value for the indexed field
* match the given value.
*/
byte[] start(byte[] prefix, Object value) {
checkParent(prefix);
return (parent != null) ? buildKey(false, prefix, name, toKey(value))
: buildKey(name, toKey(value));
}
/** The key for the index's end marker. */
byte[] end(byte[] prefix) {
checkParent(prefix);
return (parent != null) ? buildKey(false, prefix, name, END_MARKER)
: buildKey(name, END_MARKER);
}
/** The key for the end marker for entries with the given value. */
byte[] end(byte[] prefix, Object value) {
checkParent(prefix);
return (parent != null) ? buildKey(false, prefix, name, toKey(value), END_MARKER)
: buildKey(name, toKey(value), END_MARKER);
}
/** The full key in the index that identifies the given entity. */
byte[] entityKey(byte[] prefix, Object entity) throws Exception {
Object indexValue = getValue(entity);
Preconditions.checkNotNull(indexValue, "Null index value for %s in type %s.",
name, type.getName());
byte[] entityKey = start(prefix, indexValue);
if (!isNatural) {
entityKey = buildKey(false, entityKey, toKey(naturalIndex().getValue(entity)));
}
return entityKey;
}
private void updateCount(WriteBatch batch, byte[] key, long delta) {
long updated = getCount(key) + delta;
if (updated > 0) {
batch.put(key, db.serializer.serialize(updated));
} else {
batch.delete(key);
}
}
private void addOrRemove(
WriteBatch batch,
Object entity,
Object existing,
byte[] data,
byte[] naturalKey,
byte[] prefix) throws Exception {
Object indexValue = getValue(entity);
Preconditions.checkNotNull(indexValue, "Null index value for %s in type %s.",
name, type.getName());
byte[] entityKey = start(prefix, indexValue);
if (!isNatural) {
entityKey = buildKey(false, entityKey, naturalKey);
}
boolean needCountUpdate = (existing == null);
// Check whether there's a need to update the index. The index needs to be updated in two
// cases:
//
// - There is no existing value for the entity, so a new index value will be added.
// - If there is a previously stored value for the entity, and the index value for the
// current index does not match the new value, the old entry needs to be deleted and
// the new one added.
//
// Natural indices don't need to be checked, because by definition both old and new entities
// will have the same key. The put() call is all that's needed in that case.
//
// Also check whether we need to update the counts. If the indexed value is changing, we
// need to decrement the count at the old index value, and the new indexed value count needs
// to be incremented.
if (existing != null && !isNatural) {
byte[] oldPrefix = null;
Object oldIndexedValue = getValue(existing);
boolean removeExisting = !indexValue.equals(oldIndexedValue);
if (!removeExisting && isChild()) {
oldPrefix = parent().childPrefix(parent().getValue(existing));
removeExisting = LevelDBIterator.compare(prefix, oldPrefix) != 0;
}
if (removeExisting) {
if (oldPrefix == null && isChild()) {
oldPrefix = parent().childPrefix(parent().getValue(existing));
}
byte[] oldKey = entityKey(oldPrefix, existing);
batch.delete(oldKey);
// If the indexed value has changed, we need to update the counts at the old and new
// end markers for the indexed value.
if (!isChild()) {
byte[] oldCountKey = end(null, oldIndexedValue);
updateCount(batch, oldCountKey, -1L);
needCountUpdate = true;
}
}
}
if (data != null) {
byte[] stored = copy ? data : naturalKey;
batch.put(entityKey, stored);
} else {
batch.delete(entityKey);
}
if (needCountUpdate && !isChild()) {
long delta = data != null ? 1L : -1L;
byte[] countKey = isNatural ? end(prefix) : end(prefix, indexValue);
updateCount(batch, countKey, delta);
}
}
/**
* Add an entry to the index.
*
* @param batch Write batch with other related changes.
* @param entity The entity being added to the index.
* @param existing The entity being replaced in the index, or null.
* @param data Serialized entity to store (when storing the entity, not a reference).
* @param naturalKey The value's natural key (to avoid re-computing it for every index).
* @param prefix The parent index prefix, if this is a child index.
*/
void add(
WriteBatch batch,
Object entity,
Object existing,
byte[] data,
byte[] naturalKey,
byte[] prefix) throws Exception {
addOrRemove(batch, entity, existing, data, naturalKey, prefix);
}
/**
* Remove a value from the index.
*
* @param batch Write batch with other related changes.
* @param entity The entity being removed, to identify the index entry to modify.
* @param naturalKey The value's natural key (to avoid re-computing it for every index).
* @param prefix The parent index prefix, if this is a child index.
*/
void remove(
WriteBatch batch,
Object entity,
byte[] naturalKey,
byte[] prefix) throws Exception {
addOrRemove(batch, entity, null, null, naturalKey, prefix);
}
long getCount(byte[] key) {
byte[] data = db.db().get(key);
return data != null ? db.serializer.deserializeLong(data) : 0;
}
byte[] toParentKey(Object value) {
return toKey(value, SECONDARY_IDX_PREFIX);
}
byte[] toKey(Object value) {
return toKey(value, ENTRY_PREFIX);
}
/**
* Translates a value to be used as part of the store key.
*
* Integral numbers are encoded as a string in a way that preserves lexicographical
* ordering. The string is prepended with a marker telling whether the number is negative
* or positive ("*" for negative and "=" for positive are used since "-" and "+" have the
* opposite of the desired order), and then the number is encoded into a hex string (so
* it occupies twice the number of bytes as the original type).
*
* Arrays are encoded by encoding each element separately, separated by KEY_SEPARATOR.
*/
byte[] toKey(Object value, byte prefix) {
final byte[] result;
if (value instanceof String) {
byte[] str = ((String) value).getBytes(UTF_8);
result = new byte[str.length + 1];
result[0] = prefix;
System.arraycopy(str, 0, result, 1, str.length);
} else if (value instanceof Boolean) {
result = new byte[] { prefix, (Boolean) value ? TRUE : FALSE };
} else if (value.getClass().isArray()) {
int length = Array.getLength(value);
byte[][] components = new byte[length][];
for (int i = 0; i < length; i++) {
components[i] = toKey(Array.get(value, i));
}
result = buildKey(false, components);
} else {
int bytes;
if (value instanceof Integer) {
bytes = Integer.SIZE;
} else if (value instanceof Long) {
bytes = Long.SIZE;
} else if (value instanceof Short) {
bytes = Short.SIZE;
} else if (value instanceof Byte) {
bytes = Byte.SIZE;
} else {
throw new IllegalArgumentException(String.format("Type %s not allowed as key.",
value.getClass().getName()));
}
bytes = bytes / Byte.SIZE;
byte[] key = new byte[bytes * 2 + 2];
long longValue = ((Number) value).longValue();
key[0] = prefix;
key[1] = longValue >= 0 ? POSITIVE_MARKER : NEGATIVE_MARKER;
for (int i = 0; i < key.length - 2; i++) {
int masked = (int) ((longValue >>> (4 * i)) & 0xF);
key[key.length - i - 1] = HEX_BYTES[masked];
}
result = key;
}
return result;
}
}
}
| 9,954 |
0 | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util/kvstore/ArrayWrappers.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import java.util.Arrays;
import com.google.common.base.Preconditions;
/**
* A factory for array wrappers so that arrays can be used as keys in a map, sorted or not.
*
* The comparator implementation makes two assumptions:
* - All elements are instances of Comparable
* - When comparing two arrays, they both contain elements of the same type in corresponding
* indices.
*
* Otherwise, ClassCastExceptions may occur. The equality method can compare any two arrays.
*
* This class is not efficient and is mostly meant to compare really small arrays, like those
* generally used as indices and keys in a KVStore.
*/
class ArrayWrappers {
@SuppressWarnings("unchecked")
public static Comparable<Object> forArray(Object a) {
Preconditions.checkArgument(a.getClass().isArray());
Comparable<?> ret;
if (a instanceof int[]) {
ret = new ComparableIntArray((int[]) a);
} else if (a instanceof long[]) {
ret = new ComparableLongArray((long[]) a);
} else if (a instanceof byte[]) {
ret = new ComparableByteArray((byte[]) a);
} else {
Preconditions.checkArgument(!a.getClass().getComponentType().isPrimitive());
ret = new ComparableObjectArray((Object[]) a);
}
return (Comparable<Object>) ret;
}
private static class ComparableIntArray implements Comparable<ComparableIntArray> {
private final int[] array;
ComparableIntArray(int[] array) {
this.array = array;
}
@Override
public boolean equals(Object other) {
if (!(other instanceof ComparableIntArray)) {
return false;
}
return Arrays.equals(array, ((ComparableIntArray) other).array);
}
@Override
public int hashCode() {
int code = 0;
for (int i = 0; i < array.length; i++) {
code = (code * 31) + array[i];
}
return code;
}
@Override
public int compareTo(ComparableIntArray other) {
int len = Math.min(array.length, other.array.length);
for (int i = 0; i < len; i++) {
int diff = array[i] - other.array[i];
if (diff != 0) {
return diff;
}
}
return array.length - other.array.length;
}
}
private static class ComparableLongArray implements Comparable<ComparableLongArray> {
private final long[] array;
ComparableLongArray(long[] array) {
this.array = array;
}
@Override
public boolean equals(Object other) {
if (!(other instanceof ComparableLongArray)) {
return false;
}
return Arrays.equals(array, ((ComparableLongArray) other).array);
}
@Override
public int hashCode() {
int code = 0;
for (int i = 0; i < array.length; i++) {
code = (code * 31) + (int) array[i];
}
return code;
}
@Override
public int compareTo(ComparableLongArray other) {
int len = Math.min(array.length, other.array.length);
for (int i = 0; i < len; i++) {
long diff = array[i] - other.array[i];
if (diff != 0) {
return diff > 0 ? 1 : -1;
}
}
return array.length - other.array.length;
}
}
private static class ComparableByteArray implements Comparable<ComparableByteArray> {
private final byte[] array;
ComparableByteArray(byte[] array) {
this.array = array;
}
@Override
public boolean equals(Object other) {
if (!(other instanceof ComparableByteArray)) {
return false;
}
return Arrays.equals(array, ((ComparableByteArray) other).array);
}
@Override
public int hashCode() {
int code = 0;
for (int i = 0; i < array.length; i++) {
code = (code * 31) + array[i];
}
return code;
}
@Override
public int compareTo(ComparableByteArray other) {
int len = Math.min(array.length, other.array.length);
for (int i = 0; i < len; i++) {
int diff = array[i] - other.array[i];
if (diff != 0) {
return diff;
}
}
return array.length - other.array.length;
}
}
private static class ComparableObjectArray implements Comparable<ComparableObjectArray> {
private final Object[] array;
ComparableObjectArray(Object[] array) {
this.array = array;
}
@Override
public boolean equals(Object other) {
if (!(other instanceof ComparableObjectArray)) {
return false;
}
return Arrays.equals(array, ((ComparableObjectArray) other).array);
}
@Override
public int hashCode() {
int code = 0;
for (int i = 0; i < array.length; i++) {
code = (code * 31) + array[i].hashCode();
}
return code;
}
@Override
@SuppressWarnings("unchecked")
public int compareTo(ComparableObjectArray other) {
int len = Math.min(array.length, other.array.length);
for (int i = 0; i < len; i++) {
int diff = ((Comparable<Object>) array[i]).compareTo((Comparable<Object>) other.array[i]);
if (diff != 0) {
return diff;
}
}
return array.length - other.array.length;
}
}
}
| 9,955 |
0 | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util/kvstore/KVIndex.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import org.apache.spark.annotation.Private;
/**
* Tags a field to be indexed when storing an object.
*
* <p>
* Types are required to have a natural index that uniquely identifies instances in the store.
* The default value of the annotation identifies the natural index for the type.
* </p>
*
* <p>
* Indexes allow for more efficient sorting of data read from the store. By annotating a field or
* "getter" method with this annotation, an index will be created that will provide sorting based on
* the string value of that field.
* </p>
*
* <p>
* Note that creating indices means more space will be needed, and maintenance operations like
* updating or deleting a value will become more expensive.
* </p>
*
* <p>
* Indices are restricted to String, integral types (byte, short, int, long, boolean), and arrays
* of those values.
* </p>
*/
@Private
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.FIELD, ElementType.METHOD})
public @interface KVIndex {
String NATURAL_INDEX_NAME = "__main__";
/**
* The name of the index to be created for the annotated entity. Must be unique within
* the class. Index names are not allowed to start with an underscore (that's reserved for
* internal use). The default value is the natural index name (which is always a copy index
* regardless of the annotation's values).
*/
String value() default NATURAL_INDEX_NAME;
/**
* The name of the parent index of this index. By default there is no parent index, so the
* generated data can be retrieved without having to provide a parent value.
*
* <p>
* If a parent index is defined, iterating over the data using the index will require providing
* a single value for the parent index. This serves as a rudimentary way to provide relationships
* between entities in the store.
* </p>
*/
String parent() default "";
/**
* Whether to copy the instance's data to the index, instead of just storing a pointer to the
* data. The default behavior is to just store a reference; that saves disk space but is slower
* to read, since there's a level of indirection.
*/
boolean copy() default false;
}
| 9,956 |
0 | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util/kvstore/InMemoryStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.HashSet;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.function.BiConsumer;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
import org.apache.spark.annotation.Private;
/**
* Implementation of KVStore that keeps data deserialized in memory. This store does not index
* data; instead, whenever iterating over an indexed field, the stored data is copied and sorted
* according to the index. This saves memory but makes iteration more expensive.
*/
@Private
public class InMemoryStore implements KVStore {
private Object metadata;
private InMemoryLists inMemoryLists = new InMemoryLists();
@Override
public <T> T getMetadata(Class<T> klass) {
return klass.cast(metadata);
}
@Override
public void setMetadata(Object value) {
this.metadata = value;
}
@Override
public long count(Class<?> type) {
InstanceList<?> list = inMemoryLists.get(type);
return list != null ? list.size() : 0;
}
@Override
public long count(Class<?> type, String index, Object indexedValue) throws Exception {
InstanceList<?> list = inMemoryLists.get(type);
int count = 0;
Object comparable = asKey(indexedValue);
KVTypeInfo.Accessor accessor = list.getIndexAccessor(index);
for (Object o : view(type)) {
if (Objects.equal(comparable, asKey(accessor.get(o)))) {
count++;
}
}
return count;
}
@Override
public <T> T read(Class<T> klass, Object naturalKey) {
InstanceList<T> list = inMemoryLists.get(klass);
T value = list != null ? list.get(naturalKey) : null;
if (value == null) {
throw new NoSuchElementException();
}
return value;
}
@Override
public void write(Object value) throws Exception {
inMemoryLists.write(value);
}
@Override
public void delete(Class<?> type, Object naturalKey) {
InstanceList<?> list = inMemoryLists.get(type);
if (list != null) {
list.delete(naturalKey);
}
}
@Override
public <T> KVStoreView<T> view(Class<T> type){
InstanceList<T> list = inMemoryLists.get(type);
return list != null ? list.view() : emptyView();
}
@Override
public void close() {
metadata = null;
inMemoryLists.clear();
}
@Override
public <T> boolean removeAllByIndexValues(
Class<T> klass,
String index,
Collection<?> indexValues) {
InstanceList<T> list = inMemoryLists.get(klass);
if (list != null) {
return list.countingRemoveAllByIndexValues(index, indexValues) > 0;
} else {
return false;
}
}
@SuppressWarnings("unchecked")
private static Comparable<Object> asKey(Object in) {
if (in.getClass().isArray()) {
in = ArrayWrappers.forArray(in);
}
return (Comparable<Object>) in;
}
@SuppressWarnings("unchecked")
private static <T> KVStoreView<T> emptyView() {
return (InMemoryView<T>) InMemoryView.EMPTY_VIEW;
}
/**
* Encapsulates ConcurrentHashMap so that the typing in and out of the map strictly maps a
* class of type T to an InstanceList of type T.
*/
private static class InMemoryLists {
private final ConcurrentMap<Class<?>, InstanceList<?>> data = new ConcurrentHashMap<>();
@SuppressWarnings("unchecked")
public <T> InstanceList<T> get(Class<T> type) {
return (InstanceList<T>) data.get(type);
}
@SuppressWarnings("unchecked")
public <T> void write(T value) throws Exception {
InstanceList<T> list =
(InstanceList<T>) data.computeIfAbsent(value.getClass(), InstanceList::new);
list.put(value);
}
public void clear() {
data.clear();
}
}
private static class InstanceList<T> {
/**
* A BiConsumer to control multi-entity removal. We use this in a forEach rather than an
* iterator because there is a bug in jdk8 which affects remove() on all concurrent map
* iterators. https://bugs.openjdk.java.net/browse/JDK-8078645
*/
private static class CountingRemoveIfForEach<T> implements BiConsumer<Comparable<Object>, T> {
private final ConcurrentMap<Comparable<Object>, T> data;
private final Predicate<? super T> filter;
/**
* Keeps a count of the number of elements removed. This count is not currently surfaced
* to clients of KVStore as Java's generic removeAll() construct returns only a boolean,
* but I found it handy to have the count of elements removed while debugging; a count being
* no more complicated than a boolean, I've retained that behavior here, even though there
* is no current requirement.
*/
private int count = 0;
CountingRemoveIfForEach(
ConcurrentMap<Comparable<Object>, T> data,
Predicate<? super T> filter) {
this.data = data;
this.filter = filter;
}
@Override
public void accept(Comparable<Object> key, T value) {
if (filter.test(value)) {
if (data.remove(key, value)) {
count++;
}
}
}
public int count() { return count; }
}
private final KVTypeInfo ti;
private final KVTypeInfo.Accessor naturalKey;
private final ConcurrentMap<Comparable<Object>, T> data;
private InstanceList(Class<?> klass) {
this.ti = new KVTypeInfo(klass);
this.naturalKey = ti.getAccessor(KVIndex.NATURAL_INDEX_NAME);
this.data = new ConcurrentHashMap<>();
}
KVTypeInfo.Accessor getIndexAccessor(String indexName) {
return ti.getAccessor(indexName);
}
int countingRemoveAllByIndexValues(String index, Collection<?> indexValues) {
Predicate<? super T> filter = getPredicate(ti.getAccessor(index), indexValues);
CountingRemoveIfForEach<T> callback = new CountingRemoveIfForEach<>(data, filter);
data.forEach(callback);
return callback.count();
}
public T get(Object key) {
return data.get(asKey(key));
}
public void put(T value) throws Exception {
data.put(asKey(naturalKey.get(value)), value);
}
public void delete(Object key) {
data.remove(asKey(key));
}
public int size() {
return data.size();
}
public InMemoryView<T> view() {
return new InMemoryView<>(data.values(), ti);
}
private static <T> Predicate<? super T> getPredicate(
KVTypeInfo.Accessor getter,
Collection<?> values) {
if (Comparable.class.isAssignableFrom(getter.getType())) {
HashSet<?> set = new HashSet<>(values);
return (value) -> set.contains(indexValueForEntity(getter, value));
} else {
HashSet<Comparable> set = new HashSet<>(values.size());
for (Object key : values) {
set.add(asKey(key));
}
return (value) -> set.contains(asKey(indexValueForEntity(getter, value)));
}
}
private static Object indexValueForEntity(KVTypeInfo.Accessor getter, Object entity) {
try {
return getter.get(entity);
} catch (ReflectiveOperationException e) {
throw new RuntimeException(e);
}
}
}
private static class InMemoryView<T> extends KVStoreView<T> {
private static final InMemoryView<?> EMPTY_VIEW =
new InMemoryView<>(Collections.emptyList(), null);
private final Collection<T> elements;
private final KVTypeInfo ti;
private final KVTypeInfo.Accessor natural;
InMemoryView(Collection<T> elements, KVTypeInfo ti) {
this.elements = elements;
this.ti = ti;
this.natural = ti != null ? ti.getAccessor(KVIndex.NATURAL_INDEX_NAME) : null;
}
@Override
public Iterator<T> iterator() {
if (elements.isEmpty()) {
return new InMemoryIterator<>(elements.iterator());
}
KVTypeInfo.Accessor getter = index != null ? ti.getAccessor(index) : null;
int modifier = ascending ? 1 : -1;
final List<T> sorted = copyElements();
sorted.sort((e1, e2) -> modifier * compare(e1, e2, getter));
Stream<T> stream = sorted.stream();
if (first != null) {
Comparable<?> firstKey = asKey(first);
stream = stream.filter(e -> modifier * compare(e, getter, firstKey) >= 0);
}
if (last != null) {
Comparable<?> lastKey = asKey(last);
stream = stream.filter(e -> modifier * compare(e, getter, lastKey) <= 0);
}
if (skip > 0) {
stream = stream.skip(skip);
}
if (max < sorted.size()) {
stream = stream.limit((int) max);
}
return new InMemoryIterator<>(stream.iterator());
}
/**
* Create a copy of the input elements, filtering the values for child indices if needed.
*/
private List<T> copyElements() {
if (parent != null) {
KVTypeInfo.Accessor parentGetter = ti.getParentAccessor(index);
Preconditions.checkArgument(parentGetter != null, "Parent filter for non-child index.");
Comparable<?> parentKey = asKey(parent);
return elements.stream()
.filter(e -> compare(e, parentGetter, parentKey) == 0)
.collect(Collectors.toList());
} else {
return new ArrayList<>(elements);
}
}
private int compare(T e1, T e2, KVTypeInfo.Accessor getter) {
try {
int diff = compare(e1, getter, asKey(getter.get(e2)));
if (diff == 0 && getter != natural) {
diff = compare(e1, natural, asKey(natural.get(e2)));
}
return diff;
} catch (ReflectiveOperationException e) {
throw new RuntimeException(e);
}
}
private int compare(T e1, KVTypeInfo.Accessor getter, Comparable<?> v2) {
try {
return asKey(getter.get(e1)).compareTo(v2);
} catch (ReflectiveOperationException e) {
throw new RuntimeException(e);
}
}
}
private static class InMemoryIterator<T> implements KVStoreIterator<T> {
private final Iterator<T> iter;
InMemoryIterator(Iterator<T> iter) {
this.iter = iter;
}
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public T next() {
return iter.next();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
@Override
public List<T> next(int max) {
List<T> list = new ArrayList<>(max);
while (hasNext() && list.size() < max) {
list.add(next());
}
return list;
}
@Override
public boolean skip(long n) {
long skipped = 0;
while (skipped < n) {
if (hasNext()) {
next();
skipped++;
} else {
return false;
}
}
return hasNext();
}
@Override
public void close() {
// no op.
}
}
}
| 9,957 |
0 | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util/kvstore/LevelDBIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import org.iq80.leveldb.DBIterator;
class LevelDBIterator<T> implements KVStoreIterator<T> {
private final LevelDB db;
private final boolean ascending;
private final DBIterator it;
private final Class<T> type;
private final LevelDBTypeInfo ti;
private final LevelDBTypeInfo.Index index;
private final byte[] indexKeyPrefix;
private final byte[] end;
private final long max;
private boolean checkedNext;
private byte[] next;
private boolean closed;
private long count;
LevelDBIterator(Class<T> type, LevelDB db, KVStoreView<T> params) throws Exception {
this.db = db;
this.ascending = params.ascending;
this.it = db.db().iterator();
this.type = type;
this.ti = db.getTypeInfo(type);
this.index = ti.index(params.index);
this.max = params.max;
Preconditions.checkArgument(!index.isChild() || params.parent != null,
"Cannot iterate over child index %s without parent value.", params.index);
byte[] parent = index.isChild() ? index.parent().childPrefix(params.parent) : null;
this.indexKeyPrefix = index.keyPrefix(parent);
byte[] firstKey;
if (params.first != null) {
if (ascending) {
firstKey = index.start(parent, params.first);
} else {
firstKey = index.end(parent, params.first);
}
} else if (ascending) {
firstKey = index.keyPrefix(parent);
} else {
firstKey = index.end(parent);
}
it.seek(firstKey);
byte[] end = null;
if (ascending) {
if (params.last != null) {
end = index.end(parent, params.last);
} else {
end = index.end(parent);
}
} else {
if (params.last != null) {
end = index.start(parent, params.last);
}
if (it.hasNext()) {
// When descending, the caller may have set up the start of iteration at a non-existent
// entry that is guaranteed to be after the desired entry. For example, if you have a
// compound key (a, b) where b is a, integer, you may seek to the end of the elements that
// have the same "a" value by specifying Integer.MAX_VALUE for "b", and that value may not
// exist in the database. So need to check here whether the next value actually belongs to
// the set being returned by the iterator before advancing.
byte[] nextKey = it.peekNext().getKey();
if (compare(nextKey, indexKeyPrefix) <= 0) {
it.next();
}
}
}
this.end = end;
if (params.skip > 0) {
skip(params.skip);
}
}
@Override
public boolean hasNext() {
if (!checkedNext && !closed) {
next = loadNext();
checkedNext = true;
}
if (!closed && next == null) {
try {
close();
} catch (IOException ioe) {
throw Throwables.propagate(ioe);
}
}
return next != null;
}
@Override
public T next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
checkedNext = false;
try {
T ret;
if (index == null || index.isCopy()) {
ret = db.serializer.deserialize(next, type);
} else {
byte[] key = ti.buildKey(false, ti.naturalIndex().keyPrefix(null), next);
ret = db.get(key, type);
}
next = null;
return ret;
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
@Override
public List<T> next(int max) {
List<T> list = new ArrayList<>(max);
while (hasNext() && list.size() < max) {
list.add(next());
}
return list;
}
@Override
public boolean skip(long n) {
long skipped = 0;
while (skipped < n) {
if (next != null) {
checkedNext = false;
next = null;
skipped++;
continue;
}
boolean hasNext = ascending ? it.hasNext() : it.hasPrev();
if (!hasNext) {
checkedNext = true;
return false;
}
Map.Entry<byte[], byte[]> e = ascending ? it.next() : it.prev();
if (!isEndMarker(e.getKey())) {
skipped++;
}
}
return hasNext();
}
@Override
public synchronized void close() throws IOException {
if (!closed) {
it.close();
closed = true;
}
}
/**
* Because it's tricky to expose closeable iterators through many internal APIs, especially
* when Scala wrappers are used, this makes sure that, hopefully, the JNI resources held by
* the iterator will eventually be released.
*/
@Override
protected void finalize() throws Throwable {
db.closeIterator(this);
}
private byte[] loadNext() {
if (count >= max) {
return null;
}
while (true) {
boolean hasNext = ascending ? it.hasNext() : it.hasPrev();
if (!hasNext) {
return null;
}
Map.Entry<byte[], byte[]> nextEntry;
try {
// Avoid races if another thread is updating the DB.
nextEntry = ascending ? it.next() : it.prev();
} catch (NoSuchElementException e) {
return null;
}
byte[] nextKey = nextEntry.getKey();
// Next key is not part of the index, stop.
if (!startsWith(nextKey, indexKeyPrefix)) {
return null;
}
// If the next key is an end marker, then skip it.
if (isEndMarker(nextKey)) {
continue;
}
// If there's a known end key and iteration has gone past it, stop.
if (end != null) {
int comp = compare(nextKey, end) * (ascending ? 1 : -1);
if (comp > 0) {
return null;
}
}
count++;
// Next element is part of the iteration, return it.
return nextEntry.getValue();
}
}
@VisibleForTesting
static boolean startsWith(byte[] key, byte[] prefix) {
if (key.length < prefix.length) {
return false;
}
for (int i = 0; i < prefix.length; i++) {
if (key[i] != prefix[i]) {
return false;
}
}
return true;
}
private boolean isEndMarker(byte[] key) {
return (key.length > 2 &&
key[key.length - 2] == LevelDBTypeInfo.KEY_SEPARATOR &&
key[key.length - 1] == LevelDBTypeInfo.END_MARKER[0]);
}
static int compare(byte[] a, byte[] b) {
int diff = 0;
int minLen = Math.min(a.length, b.length);
for (int i = 0; i < minLen; i++) {
diff += (a[i] - b[i]);
if (diff != 0) {
return diff;
}
}
return a.length - b.length;
}
}
| 9,958 |
0 | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util/kvstore/UnsupportedStoreVersionException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import java.io.IOException;
import org.apache.spark.annotation.Private;
/**
* Exception thrown when the store implementation is not compatible with the underlying data.
*/
@Private
public class UnsupportedStoreVersionException extends IOException {
}
| 9,959 |
0 | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util/kvstore/LevelDB.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import java.io.File;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicReference;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import org.fusesource.leveldbjni.JniDBFactory;
import org.iq80.leveldb.DB;
import org.iq80.leveldb.Options;
import org.iq80.leveldb.WriteBatch;
import org.apache.spark.annotation.Private;
/**
* Implementation of KVStore that uses LevelDB as the underlying data store.
*/
@Private
public class LevelDB implements KVStore {
@VisibleForTesting
static final long STORE_VERSION = 1L;
@VisibleForTesting
static final byte[] STORE_VERSION_KEY = "__version__".getBytes(UTF_8);
/** DB key where app metadata is stored. */
private static final byte[] METADATA_KEY = "__meta__".getBytes(UTF_8);
/** DB key where type aliases are stored. */
private static final byte[] TYPE_ALIASES_KEY = "__types__".getBytes(UTF_8);
final AtomicReference<DB> _db;
final KVStoreSerializer serializer;
/**
* Keep a mapping of class names to a shorter, unique ID managed by the store. This serves two
* purposes: make the keys stored on disk shorter, and spread out the keys, since class names
* will often have a long, redundant prefix (think "org.apache.spark.").
*/
private final ConcurrentMap<String, byte[]> typeAliases;
private final ConcurrentMap<Class<?>, LevelDBTypeInfo> types;
public LevelDB(File path) throws Exception {
this(path, new KVStoreSerializer());
}
public LevelDB(File path, KVStoreSerializer serializer) throws Exception {
this.serializer = serializer;
this.types = new ConcurrentHashMap<>();
Options options = new Options();
options.createIfMissing(true);
this._db = new AtomicReference<>(JniDBFactory.factory.open(path, options));
byte[] versionData = db().get(STORE_VERSION_KEY);
if (versionData != null) {
long version = serializer.deserializeLong(versionData);
if (version != STORE_VERSION) {
close();
throw new UnsupportedStoreVersionException();
}
} else {
db().put(STORE_VERSION_KEY, serializer.serialize(STORE_VERSION));
}
Map<String, byte[]> aliases;
try {
aliases = get(TYPE_ALIASES_KEY, TypeAliases.class).aliases;
} catch (NoSuchElementException e) {
aliases = new HashMap<>();
}
typeAliases = new ConcurrentHashMap<>(aliases);
}
@Override
public <T> T getMetadata(Class<T> klass) throws Exception {
try {
return get(METADATA_KEY, klass);
} catch (NoSuchElementException nsee) {
return null;
}
}
@Override
public void setMetadata(Object value) throws Exception {
if (value != null) {
put(METADATA_KEY, value);
} else {
db().delete(METADATA_KEY);
}
}
<T> T get(byte[] key, Class<T> klass) throws Exception {
byte[] data = db().get(key);
if (data == null) {
throw new NoSuchElementException(new String(key, UTF_8));
}
return serializer.deserialize(data, klass);
}
private void put(byte[] key, Object value) throws Exception {
Preconditions.checkArgument(value != null, "Null values are not allowed.");
db().put(key, serializer.serialize(value));
}
@Override
public <T> T read(Class<T> klass, Object naturalKey) throws Exception {
Preconditions.checkArgument(naturalKey != null, "Null keys are not allowed.");
byte[] key = getTypeInfo(klass).naturalIndex().start(null, naturalKey);
return get(key, klass);
}
@Override
public void write(Object value) throws Exception {
Preconditions.checkArgument(value != null, "Null values are not allowed.");
LevelDBTypeInfo ti = getTypeInfo(value.getClass());
try (WriteBatch batch = db().createWriteBatch()) {
byte[] data = serializer.serialize(value);
synchronized (ti) {
Object existing;
try {
existing = get(ti.naturalIndex().entityKey(null, value), value.getClass());
} catch (NoSuchElementException e) {
existing = null;
}
PrefixCache cache = new PrefixCache(value);
byte[] naturalKey = ti.naturalIndex().toKey(ti.naturalIndex().getValue(value));
for (LevelDBTypeInfo.Index idx : ti.indices()) {
byte[] prefix = cache.getPrefix(idx);
idx.add(batch, value, existing, data, naturalKey, prefix);
}
db().write(batch);
}
}
}
@Override
public void delete(Class<?> type, Object naturalKey) throws Exception {
Preconditions.checkArgument(naturalKey != null, "Null keys are not allowed.");
try (WriteBatch batch = db().createWriteBatch()) {
LevelDBTypeInfo ti = getTypeInfo(type);
byte[] key = ti.naturalIndex().start(null, naturalKey);
synchronized (ti) {
byte[] data = db().get(key);
if (data != null) {
Object existing = serializer.deserialize(data, type);
PrefixCache cache = new PrefixCache(existing);
byte[] keyBytes = ti.naturalIndex().toKey(ti.naturalIndex().getValue(existing));
for (LevelDBTypeInfo.Index idx : ti.indices()) {
idx.remove(batch, existing, keyBytes, cache.getPrefix(idx));
}
db().write(batch);
}
}
} catch (NoSuchElementException nse) {
// Ignore.
}
}
@Override
public <T> KVStoreView<T> view(Class<T> type) throws Exception {
return new KVStoreView<T>() {
@Override
public Iterator<T> iterator() {
try {
return new LevelDBIterator<>(type, LevelDB.this, this);
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
};
}
@Override
public <T> boolean removeAllByIndexValues(
Class<T> klass,
String index,
Collection<?> indexValues) throws Exception {
LevelDBTypeInfo.Index naturalIndex = getTypeInfo(klass).naturalIndex();
boolean removed = false;
KVStoreView<T> view = view(klass).index(index);
for (Object indexValue : indexValues) {
for (T value: view.first(indexValue).last(indexValue)) {
Object itemKey = naturalIndex.getValue(value);
delete(klass, itemKey);
removed = true;
}
}
return removed;
}
@Override
public long count(Class<?> type) throws Exception {
LevelDBTypeInfo.Index idx = getTypeInfo(type).naturalIndex();
return idx.getCount(idx.end(null));
}
@Override
public long count(Class<?> type, String index, Object indexedValue) throws Exception {
LevelDBTypeInfo.Index idx = getTypeInfo(type).index(index);
return idx.getCount(idx.end(null, indexedValue));
}
@Override
public void close() throws IOException {
synchronized (this._db) {
DB _db = this._db.getAndSet(null);
if (_db == null) {
return;
}
try {
_db.close();
} catch (IOException ioe) {
throw ioe;
} catch (Exception e) {
throw new IOException(e.getMessage(), e);
}
}
}
/**
* Closes the given iterator if the DB is still open. Trying to close a JNI LevelDB handle
* with a closed DB can cause JVM crashes, so this ensures that situation does not happen.
*/
void closeIterator(LevelDBIterator<?> it) throws IOException {
synchronized (this._db) {
DB _db = this._db.get();
if (_db != null) {
it.close();
}
}
}
/** Returns metadata about indices for the given type. */
LevelDBTypeInfo getTypeInfo(Class<?> type) throws Exception {
LevelDBTypeInfo ti = types.get(type);
if (ti == null) {
LevelDBTypeInfo tmp = new LevelDBTypeInfo(this, type, getTypeAlias(type));
ti = types.putIfAbsent(type, tmp);
if (ti == null) {
ti = tmp;
}
}
return ti;
}
/**
* Try to avoid use-after close since that has the tendency of crashing the JVM. This doesn't
* prevent methods that retrieved the instance from using it after close, but hopefully will
* catch most cases; otherwise, we'll need some kind of locking.
*/
DB db() {
DB _db = this._db.get();
if (_db == null) {
throw new IllegalStateException("DB is closed.");
}
return _db;
}
private byte[] getTypeAlias(Class<?> klass) throws Exception {
byte[] alias = typeAliases.get(klass.getName());
if (alias == null) {
synchronized (typeAliases) {
byte[] tmp = String.valueOf(typeAliases.size()).getBytes(UTF_8);
alias = typeAliases.putIfAbsent(klass.getName(), tmp);
if (alias == null) {
alias = tmp;
put(TYPE_ALIASES_KEY, new TypeAliases(typeAliases));
}
}
}
return alias;
}
/** Needs to be public for Jackson. */
public static class TypeAliases {
public Map<String, byte[]> aliases;
TypeAliases(Map<String, byte[]> aliases) {
this.aliases = aliases;
}
TypeAliases() {
this(null);
}
}
private static class PrefixCache {
private final Object entity;
private final Map<LevelDBTypeInfo.Index, byte[]> prefixes;
PrefixCache(Object entity) {
this.entity = entity;
this.prefixes = new HashMap<>();
}
byte[] getPrefix(LevelDBTypeInfo.Index idx) throws Exception {
byte[] prefix = null;
if (idx.isChild()) {
prefix = prefixes.get(idx.parent());
if (prefix == null) {
prefix = idx.parent().childPrefix(idx.parent().getValue(entity));
prefixes.put(idx.parent(), prefix);
}
}
return prefix;
}
}
}
| 9,960 |
0 | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util/kvstore/KVStoreIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import java.io.Closeable;
import java.util.Iterator;
import java.util.List;
import org.apache.spark.annotation.Private;
/**
* An iterator for KVStore.
*
* <p>
* Iterators may keep references to resources that need to be closed. It's recommended that users
* explicitly close iterators after they're used.
* </p>
*/
@Private
public interface KVStoreIterator<T> extends Iterator<T>, Closeable {
/**
* Retrieve multiple elements from the store.
*
* @param max Maximum number of elements to retrieve.
*/
List<T> next(int max);
/**
* Skip in the iterator.
*
* @return Whether there are items left after skipping.
*/
boolean skip(long n);
}
| 9,961 |
0 | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util/kvstore/KVStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import java.io.Closeable;
import java.util.Collection;
import org.apache.spark.annotation.Private;
/**
* Abstraction for a local key/value store for storing app data.
*
* <p>
* There are two main features provided by the implementations of this interface:
* </p>
*
* <h3>Serialization</h3>
*
* <p>
* If the underlying data store requires serialization, data will be serialized to and deserialized
* using a {@link KVStoreSerializer}, which can be customized by the application. The serializer is
* based on Jackson, so it supports all the Jackson annotations for controlling the serialization of
* app-defined types.
* </p>
*
* <p>
* Data is also automatically compressed to save disk space.
* </p>
*
* <h3>Automatic Key Management</h3>
*
* <p>
* When using the built-in key management, the implementation will automatically create unique
* keys for each type written to the store. Keys are based on the type name, and always start
* with the "+" prefix character (so that it's easy to use both manual and automatic key
* management APIs without conflicts).
* </p>
*
* <p>
* Another feature of automatic key management is indexing; by annotating fields or methods of
* objects written to the store with {@link KVIndex}, indices are created to sort the data
* by the values of those properties. This makes it possible to provide sorting without having
* to load all instances of those types from the store.
* </p>
*
* <p>
* KVStore instances are thread-safe for both reads and writes.
* </p>
*/
@Private
public interface KVStore extends Closeable {
/**
* Returns app-specific metadata from the store, or null if it's not currently set.
*
* <p>
* The metadata type is application-specific. This is a convenience method so that applications
* don't need to define their own keys for this information.
* </p>
*/
<T> T getMetadata(Class<T> klass) throws Exception;
/**
* Writes the given value in the store metadata key.
*/
void setMetadata(Object value) throws Exception;
/**
* Read a specific instance of an object.
*
* @param naturalKey The object's "natural key", which uniquely identifies it. Null keys
* are not allowed.
* @throws java.util.NoSuchElementException If an element with the given key does not exist.
*/
<T> T read(Class<T> klass, Object naturalKey) throws Exception;
/**
* Writes the given object to the store, including indexed fields. Indices are updated based
* on the annotated fields of the object's class.
*
* <p>
* Writes may be slower when the object already exists in the store, since it will involve
* updating existing indices.
* </p>
*
* @param value The object to write.
*/
void write(Object value) throws Exception;
/**
* Removes an object and all data related to it, like index entries, from the store.
*
* @param type The object's type.
* @param naturalKey The object's "natural key", which uniquely identifies it. Null keys
* are not allowed.
* @throws java.util.NoSuchElementException If an element with the given key does not exist.
*/
void delete(Class<?> type, Object naturalKey) throws Exception;
/**
* Returns a configurable view for iterating over entities of the given type.
*/
<T> KVStoreView<T> view(Class<T> type) throws Exception;
/**
* Returns the number of items of the given type currently in the store.
*/
long count(Class<?> type) throws Exception;
/**
* Returns the number of items of the given type which match the given indexed value.
*/
long count(Class<?> type, String index, Object indexedValue) throws Exception;
/**
* A cheaper way to remove multiple items from the KVStore
*/
<T> boolean removeAllByIndexValues(Class<T> klass, String index, Collection<?> indexValues)
throws Exception;
}
| 9,962 |
0 | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util/kvstore/KVStoreView.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import com.google.common.base.Preconditions;
import org.apache.spark.annotation.Private;
/**
* A configurable view that allows iterating over values in a {@link KVStore}.
*
* <p>
* The different methods can be used to configure the behavior of the iterator. Calling the same
* method multiple times is allowed; the most recent value will be used.
* </p>
*
* <p>
* The iterators returned by this view are of type {@link KVStoreIterator}; they auto-close
* when used in a for loop that exhausts their contents, but when used manually, they need
* to be closed explicitly unless all elements are read.
* </p>
*/
@Private
public abstract class KVStoreView<T> implements Iterable<T> {
boolean ascending = true;
String index = KVIndex.NATURAL_INDEX_NAME;
Object first = null;
Object last = null;
Object parent = null;
long skip = 0L;
long max = Long.MAX_VALUE;
/**
* Reverses the order of iteration. By default, iterates in ascending order.
*/
public KVStoreView<T> reverse() {
ascending = !ascending;
return this;
}
/**
* Iterates according to the given index.
*/
public KVStoreView<T> index(String name) {
this.index = Preconditions.checkNotNull(name);
return this;
}
/**
* Defines the value of the parent index when iterating over a child index. Only elements that
* match the parent index's value will be included in the iteration.
*
* <p>
* Required for iterating over child indices, will generate an error if iterating over a
* parent-less index.
* </p>
*/
public KVStoreView<T> parent(Object value) {
this.parent = value;
return this;
}
/**
* Iterates starting at the given value of the chosen index (inclusive).
*/
public KVStoreView<T> first(Object value) {
this.first = value;
return this;
}
/**
* Stops iteration at the given value of the chosen index (inclusive).
*/
public KVStoreView<T> last(Object value) {
this.last = value;
return this;
}
/**
* Stops iteration after a number of elements has been retrieved.
*/
public KVStoreView<T> max(long max) {
Preconditions.checkArgument(max > 0L, "max must be positive.");
this.max = max;
return this;
}
/**
* Skips a number of elements at the start of iteration. Skipped elements are not accounted
* when using {@link #max(long)}.
*/
public KVStoreView<T> skip(long n) {
this.skip = n;
return this;
}
/**
* Returns an iterator for the current configuration.
*/
public KVStoreIterator<T> closeableIterator() throws Exception {
return (KVStoreIterator<T>) iterator();
}
}
| 9,963 |
0 | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util/kvstore/KVTypeInfo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Stream;
import com.google.common.base.Preconditions;
import org.apache.spark.annotation.Private;
/**
* Wrapper around types managed in a KVStore, providing easy access to their indexed fields.
*/
@Private
public class KVTypeInfo {
private final Class<?> type;
private final Map<String, KVIndex> indices;
private final Map<String, Accessor> accessors;
public KVTypeInfo(Class<?> type) {
this.type = type;
this.accessors = new HashMap<>();
this.indices = new HashMap<>();
for (Field f : type.getDeclaredFields()) {
KVIndex idx = f.getAnnotation(KVIndex.class);
if (idx != null) {
checkIndex(idx, indices);
f.setAccessible(true);
indices.put(idx.value(), idx);
f.setAccessible(true);
accessors.put(idx.value(), new FieldAccessor(f));
}
}
for (Method m : type.getDeclaredMethods()) {
KVIndex idx = m.getAnnotation(KVIndex.class);
if (idx != null) {
checkIndex(idx, indices);
Preconditions.checkArgument(m.getParameterTypes().length == 0,
"Annotated method %s::%s should not have any parameters.", type.getName(), m.getName());
m.setAccessible(true);
indices.put(idx.value(), idx);
m.setAccessible(true);
accessors.put(idx.value(), new MethodAccessor(m));
}
}
Preconditions.checkArgument(indices.containsKey(KVIndex.NATURAL_INDEX_NAME),
"No natural index defined for type %s.", type.getName());
Preconditions.checkArgument(indices.get(KVIndex.NATURAL_INDEX_NAME).parent().isEmpty(),
"Natural index of %s cannot have a parent.", type.getName());
for (KVIndex idx : indices.values()) {
if (!idx.parent().isEmpty()) {
KVIndex parent = indices.get(idx.parent());
Preconditions.checkArgument(parent != null,
"Cannot find parent %s of index %s.", idx.parent(), idx.value());
Preconditions.checkArgument(parent.parent().isEmpty(),
"Parent index %s of index %s cannot be itself a child index.", idx.parent(), idx.value());
}
}
}
private void checkIndex(KVIndex idx, Map<String, KVIndex> indices) {
Preconditions.checkArgument(idx.value() != null && !idx.value().isEmpty(),
"No name provided for index in type %s.", type.getName());
Preconditions.checkArgument(
!idx.value().startsWith("_") || idx.value().equals(KVIndex.NATURAL_INDEX_NAME),
"Index name %s (in type %s) is not allowed.", idx.value(), type.getName());
Preconditions.checkArgument(idx.parent().isEmpty() || !idx.parent().equals(idx.value()),
"Index %s cannot be parent of itself.", idx.value());
Preconditions.checkArgument(!indices.containsKey(idx.value()),
"Duplicate index %s for type %s.", idx.value(), type.getName());
}
public Class<?> type() {
return type;
}
public Object getIndexValue(String indexName, Object instance) throws Exception {
return getAccessor(indexName).get(instance);
}
public Stream<KVIndex> indices() {
return indices.values().stream();
}
Accessor getAccessor(String indexName) {
Accessor a = accessors.get(indexName);
Preconditions.checkArgument(a != null, "No index %s.", indexName);
return a;
}
Accessor getParentAccessor(String indexName) {
KVIndex index = indices.get(indexName);
return index.parent().isEmpty() ? null : getAccessor(index.parent());
}
/**
* Abstracts the difference between invoking a Field and a Method.
*/
interface Accessor {
Object get(Object instance) throws ReflectiveOperationException;
Class getType();
}
private class FieldAccessor implements Accessor {
private final Field field;
FieldAccessor(Field field) {
this.field = field;
}
@Override
public Object get(Object instance) throws ReflectiveOperationException {
return field.get(instance);
}
@Override
public Class getType() {
return field.getType();
}
}
private class MethodAccessor implements Accessor {
private final Method method;
MethodAccessor(Method method) {
this.method = method;
}
@Override
public Object get(Object instance) throws ReflectiveOperationException {
return method.invoke(instance);
}
@Override
public Class getType() {
return method.getReturnType();
}
}
}
| 9,964 |
0 | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util | Create_ds/spark/common/kvstore/src/main/java/org/apache/spark/util/kvstore/KVStoreSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.kvstore;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.spark.annotation.Private;
/**
* Serializer used to translate between app-defined types and the LevelDB store.
*
* <p>
* The serializer is based on Jackson, so values are written as JSON. It also allows "naked strings"
* and integers to be written as values directly, which will be written as UTF-8 strings.
* </p>
*/
@Private
public class KVStoreSerializer {
/**
* Object mapper used to process app-specific types. If an application requires a specific
* configuration of the mapper, it can subclass this serializer and add custom configuration
* to this object.
*/
protected final ObjectMapper mapper;
public KVStoreSerializer() {
this.mapper = new ObjectMapper();
}
public final byte[] serialize(Object o) throws Exception {
if (o instanceof String) {
return ((String) o).getBytes(UTF_8);
} else {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
GZIPOutputStream out = new GZIPOutputStream(bytes);
try {
mapper.writeValue(out, o);
} finally {
out.close();
}
return bytes.toByteArray();
}
}
@SuppressWarnings("unchecked")
public final <T> T deserialize(byte[] data, Class<T> klass) throws Exception {
if (klass.equals(String.class)) {
return (T) new String(data, UTF_8);
} else {
GZIPInputStream in = new GZIPInputStream(new ByteArrayInputStream(data));
try {
return mapper.readValue(in, klass);
} finally {
in.close();
}
}
}
final byte[] serialize(long value) {
return String.valueOf(value).getBytes(UTF_8);
}
final long deserializeLong(byte[] data) {
return Long.parseLong(new String(data, UTF_8));
}
}
| 9,965 |
0 | Create_ds/spark/common/sketch/src/main/java/org/apache/spark/util | Create_ds/spark/common/sketch/src/main/java/org/apache/spark/util/sketch/BitArray.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.sketch;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Arrays;
final class BitArray {
private final long[] data;
private long bitCount;
static int numWords(long numBits) {
if (numBits <= 0) {
throw new IllegalArgumentException("numBits must be positive, but got " + numBits);
}
long numWords = (long) Math.ceil(numBits / 64.0);
if (numWords > Integer.MAX_VALUE) {
throw new IllegalArgumentException("Can't allocate enough space for " + numBits + " bits");
}
return (int) numWords;
}
BitArray(long numBits) {
this(new long[numWords(numBits)]);
}
private BitArray(long[] data) {
this.data = data;
long bitCount = 0;
for (long word : data) {
bitCount += Long.bitCount(word);
}
this.bitCount = bitCount;
}
/** Returns true if the bit changed value. */
boolean set(long index) {
if (!get(index)) {
data[(int) (index >>> 6)] |= (1L << index);
bitCount++;
return true;
}
return false;
}
boolean get(long index) {
return (data[(int) (index >>> 6)] & (1L << index)) != 0;
}
/** Number of bits */
long bitSize() {
return (long) data.length * Long.SIZE;
}
/** Number of set bits (1s) */
long cardinality() {
return bitCount;
}
/** Combines the two BitArrays using bitwise OR. */
void putAll(BitArray array) {
assert data.length == array.data.length : "BitArrays must be of equal length when merging";
long bitCount = 0;
for (int i = 0; i < data.length; i++) {
data[i] |= array.data[i];
bitCount += Long.bitCount(data[i]);
}
this.bitCount = bitCount;
}
void writeTo(DataOutputStream out) throws IOException {
out.writeInt(data.length);
for (long datum : data) {
out.writeLong(datum);
}
}
static BitArray readFrom(DataInputStream in) throws IOException {
int numWords = in.readInt();
long[] data = new long[numWords];
for (int i = 0; i < numWords; i++) {
data[i] = in.readLong();
}
return new BitArray(data);
}
@Override
public boolean equals(Object other) {
if (this == other) return true;
if (other == null || !(other instanceof BitArray)) return false;
BitArray that = (BitArray) other;
return Arrays.equals(data, that.data);
}
@Override
public int hashCode() {
return Arrays.hashCode(data);
}
}
| 9,966 |
0 | Create_ds/spark/common/sketch/src/main/java/org/apache/spark/util | Create_ds/spark/common/sketch/src/main/java/org/apache/spark/util/sketch/Murmur3_x86_32.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.sketch;
/**
* 32-bit Murmur3 hasher. This is based on Guava's Murmur3_32HashFunction.
*/
// This class is duplicated from `org.apache.spark.unsafe.hash.Murmur3_x86_32` to make sure
// spark-sketch has no external dependencies.
final class Murmur3_x86_32 {
private static final int C1 = 0xcc9e2d51;
private static final int C2 = 0x1b873593;
private final int seed;
Murmur3_x86_32(int seed) {
this.seed = seed;
}
@Override
public String toString() {
return "Murmur3_32(seed=" + seed + ")";
}
public int hashInt(int input) {
return hashInt(input, seed);
}
public static int hashInt(int input, int seed) {
int k1 = mixK1(input);
int h1 = mixH1(seed, k1);
return fmix(h1, 4);
}
public int hashUnsafeWords(Object base, long offset, int lengthInBytes) {
return hashUnsafeWords(base, offset, lengthInBytes, seed);
}
public static int hashUnsafeWords(Object base, long offset, int lengthInBytes, int seed) {
// This is based on Guava's `Murmur32_Hasher.processRemaining(ByteBuffer)` method.
assert (lengthInBytes % 8 == 0): "lengthInBytes must be a multiple of 8 (word-aligned)";
int h1 = hashBytesByInt(base, offset, lengthInBytes, seed);
return fmix(h1, lengthInBytes);
}
public static int hashUnsafeBytes(Object base, long offset, int lengthInBytes, int seed) {
// This is not compatible with original and another implementations.
// But remain it for backward compatibility for the components existing before 2.3.
assert (lengthInBytes >= 0): "lengthInBytes cannot be negative";
int lengthAligned = lengthInBytes - lengthInBytes % 4;
int h1 = hashBytesByInt(base, offset, lengthAligned, seed);
for (int i = lengthAligned; i < lengthInBytes; i++) {
int halfWord = Platform.getByte(base, offset + i);
int k1 = mixK1(halfWord);
h1 = mixH1(h1, k1);
}
return fmix(h1, lengthInBytes);
}
public static int hashUnsafeBytes2(Object base, long offset, int lengthInBytes, int seed) {
// This is compatible with original and another implementations.
// Use this method for new components after Spark 2.3.
assert (lengthInBytes >= 0): "lengthInBytes cannot be negative";
int lengthAligned = lengthInBytes - lengthInBytes % 4;
int h1 = hashBytesByInt(base, offset, lengthAligned, seed);
int k1 = 0;
for (int i = lengthAligned, shift = 0; i < lengthInBytes; i++, shift += 8) {
k1 ^= (Platform.getByte(base, offset + i) & 0xFF) << shift;
}
h1 ^= mixK1(k1);
return fmix(h1, lengthInBytes);
}
private static int hashBytesByInt(Object base, long offset, int lengthInBytes, int seed) {
assert (lengthInBytes % 4 == 0);
int h1 = seed;
for (int i = 0; i < lengthInBytes; i += 4) {
int halfWord = Platform.getInt(base, offset + i);
int k1 = mixK1(halfWord);
h1 = mixH1(h1, k1);
}
return h1;
}
public int hashLong(long input) {
return hashLong(input, seed);
}
public static int hashLong(long input, int seed) {
int low = (int) input;
int high = (int) (input >>> 32);
int k1 = mixK1(low);
int h1 = mixH1(seed, k1);
k1 = mixK1(high);
h1 = mixH1(h1, k1);
return fmix(h1, 8);
}
private static int mixK1(int k1) {
k1 *= C1;
k1 = Integer.rotateLeft(k1, 15);
k1 *= C2;
return k1;
}
private static int mixH1(int h1, int k1) {
h1 ^= k1;
h1 = Integer.rotateLeft(h1, 13);
h1 = h1 * 5 + 0xe6546b64;
return h1;
}
// Finalization mix - force all bits of a hash block to avalanche
private static int fmix(int h1, int length) {
h1 ^= length;
h1 ^= h1 >>> 16;
h1 *= 0x85ebca6b;
h1 ^= h1 >>> 13;
h1 *= 0xc2b2ae35;
h1 ^= h1 >>> 16;
return h1;
}
}
| 9,967 |
0 | Create_ds/spark/common/sketch/src/main/java/org/apache/spark/util | Create_ds/spark/common/sketch/src/main/java/org/apache/spark/util/sketch/IncompatibleMergeException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.sketch;
public class IncompatibleMergeException extends Exception {
public IncompatibleMergeException(String message) {
super(message);
}
}
| 9,968 |
0 | Create_ds/spark/common/sketch/src/main/java/org/apache/spark/util | Create_ds/spark/common/sketch/src/main/java/org/apache/spark/util/sketch/Utils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.sketch;
import java.nio.charset.StandardCharsets;
class Utils {
public static byte[] getBytesFromUTF8String(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
public static long integralToLong(Object i) {
long longValue;
if (i instanceof Long) {
longValue = (Long) i;
} else if (i instanceof Integer) {
longValue = ((Integer) i).longValue();
} else if (i instanceof Short) {
longValue = ((Short) i).longValue();
} else if (i instanceof Byte) {
longValue = ((Byte) i).longValue();
} else {
throw new IllegalArgumentException("Unsupported data type " + i.getClass().getName());
}
return longValue;
}
}
| 9,969 |
0 | Create_ds/spark/common/sketch/src/main/java/org/apache/spark/util | Create_ds/spark/common/sketch/src/main/java/org/apache/spark/util/sketch/Platform.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.sketch;
import java.lang.reflect.Field;
import sun.misc.Unsafe;
// This class is duplicated from `org.apache.spark.unsafe.Platform` to make sure spark-sketch has no
// external dependencies.
final class Platform {
private static final Unsafe _UNSAFE;
public static final int BYTE_ARRAY_OFFSET;
public static final int INT_ARRAY_OFFSET;
public static final int LONG_ARRAY_OFFSET;
public static final int DOUBLE_ARRAY_OFFSET;
public static int getInt(Object object, long offset) {
return _UNSAFE.getInt(object, offset);
}
public static void putInt(Object object, long offset, int value) {
_UNSAFE.putInt(object, offset, value);
}
public static boolean getBoolean(Object object, long offset) {
return _UNSAFE.getBoolean(object, offset);
}
public static void putBoolean(Object object, long offset, boolean value) {
_UNSAFE.putBoolean(object, offset, value);
}
public static byte getByte(Object object, long offset) {
return _UNSAFE.getByte(object, offset);
}
public static void putByte(Object object, long offset, byte value) {
_UNSAFE.putByte(object, offset, value);
}
public static short getShort(Object object, long offset) {
return _UNSAFE.getShort(object, offset);
}
public static void putShort(Object object, long offset, short value) {
_UNSAFE.putShort(object, offset, value);
}
public static long getLong(Object object, long offset) {
return _UNSAFE.getLong(object, offset);
}
public static void putLong(Object object, long offset, long value) {
_UNSAFE.putLong(object, offset, value);
}
public static float getFloat(Object object, long offset) {
return _UNSAFE.getFloat(object, offset);
}
public static void putFloat(Object object, long offset, float value) {
_UNSAFE.putFloat(object, offset, value);
}
public static double getDouble(Object object, long offset) {
return _UNSAFE.getDouble(object, offset);
}
public static void putDouble(Object object, long offset, double value) {
_UNSAFE.putDouble(object, offset, value);
}
public static Object getObjectVolatile(Object object, long offset) {
return _UNSAFE.getObjectVolatile(object, offset);
}
public static void putObjectVolatile(Object object, long offset, Object value) {
_UNSAFE.putObjectVolatile(object, offset, value);
}
public static long allocateMemory(long size) {
return _UNSAFE.allocateMemory(size);
}
public static void freeMemory(long address) {
_UNSAFE.freeMemory(address);
}
public static void copyMemory(
Object src, long srcOffset, Object dst, long dstOffset, long length) {
// Check if dstOffset is before or after srcOffset to determine if we should copy
// forward or backwards. This is necessary in case src and dst overlap.
if (dstOffset < srcOffset) {
while (length > 0) {
long size = Math.min(length, UNSAFE_COPY_THRESHOLD);
_UNSAFE.copyMemory(src, srcOffset, dst, dstOffset, size);
length -= size;
srcOffset += size;
dstOffset += size;
}
} else {
srcOffset += length;
dstOffset += length;
while (length > 0) {
long size = Math.min(length, UNSAFE_COPY_THRESHOLD);
srcOffset -= size;
dstOffset -= size;
_UNSAFE.copyMemory(src, srcOffset, dst, dstOffset, size);
length -= size;
}
}
}
/**
* Raises an exception bypassing compiler checks for checked exceptions.
*/
public static void throwException(Throwable t) {
_UNSAFE.throwException(t);
}
/**
* Limits the number of bytes to copy per {@link Unsafe#copyMemory(long, long, long)} to
* allow safepoint polling during a large copy.
*/
private static final long UNSAFE_COPY_THRESHOLD = 1024L * 1024L;
static {
sun.misc.Unsafe unsafe;
try {
Field unsafeField = Unsafe.class.getDeclaredField("theUnsafe");
unsafeField.setAccessible(true);
unsafe = (sun.misc.Unsafe) unsafeField.get(null);
} catch (Throwable cause) {
unsafe = null;
}
_UNSAFE = unsafe;
if (_UNSAFE != null) {
BYTE_ARRAY_OFFSET = _UNSAFE.arrayBaseOffset(byte[].class);
INT_ARRAY_OFFSET = _UNSAFE.arrayBaseOffset(int[].class);
LONG_ARRAY_OFFSET = _UNSAFE.arrayBaseOffset(long[].class);
DOUBLE_ARRAY_OFFSET = _UNSAFE.arrayBaseOffset(double[].class);
} else {
BYTE_ARRAY_OFFSET = 0;
INT_ARRAY_OFFSET = 0;
LONG_ARRAY_OFFSET = 0;
DOUBLE_ARRAY_OFFSET = 0;
}
}
}
| 9,970 |
0 | Create_ds/spark/common/sketch/src/main/java/org/apache/spark/util | Create_ds/spark/common/sketch/src/main/java/org/apache/spark/util/sketch/BloomFilterImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.sketch;
import java.io.*;
class BloomFilterImpl extends BloomFilter implements Serializable {
private int numHashFunctions;
private BitArray bits;
BloomFilterImpl(int numHashFunctions, long numBits) {
this(new BitArray(numBits), numHashFunctions);
}
private BloomFilterImpl(BitArray bits, int numHashFunctions) {
this.bits = bits;
this.numHashFunctions = numHashFunctions;
}
private BloomFilterImpl() {}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
}
if (other == null || !(other instanceof BloomFilterImpl)) {
return false;
}
BloomFilterImpl that = (BloomFilterImpl) other;
return this.numHashFunctions == that.numHashFunctions && this.bits.equals(that.bits);
}
@Override
public int hashCode() {
return bits.hashCode() * 31 + numHashFunctions;
}
@Override
public double expectedFpp() {
return Math.pow((double) bits.cardinality() / bits.bitSize(), numHashFunctions);
}
@Override
public long bitSize() {
return bits.bitSize();
}
@Override
public boolean put(Object item) {
if (item instanceof String) {
return putString((String) item);
} else if (item instanceof byte[]) {
return putBinary((byte[]) item);
} else {
return putLong(Utils.integralToLong(item));
}
}
@Override
public boolean putString(String item) {
return putBinary(Utils.getBytesFromUTF8String(item));
}
@Override
public boolean putBinary(byte[] item) {
int h1 = Murmur3_x86_32.hashUnsafeBytes(item, Platform.BYTE_ARRAY_OFFSET, item.length, 0);
int h2 = Murmur3_x86_32.hashUnsafeBytes(item, Platform.BYTE_ARRAY_OFFSET, item.length, h1);
long bitSize = bits.bitSize();
boolean bitsChanged = false;
for (int i = 1; i <= numHashFunctions; i++) {
int combinedHash = h1 + (i * h2);
// Flip all the bits if it's negative (guaranteed positive number)
if (combinedHash < 0) {
combinedHash = ~combinedHash;
}
bitsChanged |= bits.set(combinedHash % bitSize);
}
return bitsChanged;
}
@Override
public boolean mightContainString(String item) {
return mightContainBinary(Utils.getBytesFromUTF8String(item));
}
@Override
public boolean mightContainBinary(byte[] item) {
int h1 = Murmur3_x86_32.hashUnsafeBytes(item, Platform.BYTE_ARRAY_OFFSET, item.length, 0);
int h2 = Murmur3_x86_32.hashUnsafeBytes(item, Platform.BYTE_ARRAY_OFFSET, item.length, h1);
long bitSize = bits.bitSize();
for (int i = 1; i <= numHashFunctions; i++) {
int combinedHash = h1 + (i * h2);
// Flip all the bits if it's negative (guaranteed positive number)
if (combinedHash < 0) {
combinedHash = ~combinedHash;
}
if (!bits.get(combinedHash % bitSize)) {
return false;
}
}
return true;
}
@Override
public boolean putLong(long item) {
// Here we first hash the input long element into 2 int hash values, h1 and h2, then produce n
// hash values by `h1 + i * h2` with 1 <= i <= numHashFunctions.
// Note that `CountMinSketch` use a different strategy, it hash the input long element with
// every i to produce n hash values.
// TODO: the strategy of `CountMinSketch` looks more advanced, should we follow it here?
int h1 = Murmur3_x86_32.hashLong(item, 0);
int h2 = Murmur3_x86_32.hashLong(item, h1);
long bitSize = bits.bitSize();
boolean bitsChanged = false;
for (int i = 1; i <= numHashFunctions; i++) {
int combinedHash = h1 + (i * h2);
// Flip all the bits if it's negative (guaranteed positive number)
if (combinedHash < 0) {
combinedHash = ~combinedHash;
}
bitsChanged |= bits.set(combinedHash % bitSize);
}
return bitsChanged;
}
@Override
public boolean mightContainLong(long item) {
int h1 = Murmur3_x86_32.hashLong(item, 0);
int h2 = Murmur3_x86_32.hashLong(item, h1);
long bitSize = bits.bitSize();
for (int i = 1; i <= numHashFunctions; i++) {
int combinedHash = h1 + (i * h2);
// Flip all the bits if it's negative (guaranteed positive number)
if (combinedHash < 0) {
combinedHash = ~combinedHash;
}
if (!bits.get(combinedHash % bitSize)) {
return false;
}
}
return true;
}
@Override
public boolean mightContain(Object item) {
if (item instanceof String) {
return mightContainString((String) item);
} else if (item instanceof byte[]) {
return mightContainBinary((byte[]) item);
} else {
return mightContainLong(Utils.integralToLong(item));
}
}
@Override
public boolean isCompatible(BloomFilter other) {
if (other == null) {
return false;
}
if (!(other instanceof BloomFilterImpl)) {
return false;
}
BloomFilterImpl that = (BloomFilterImpl) other;
return this.bitSize() == that.bitSize() && this.numHashFunctions == that.numHashFunctions;
}
@Override
public BloomFilter mergeInPlace(BloomFilter other) throws IncompatibleMergeException {
// Duplicates the logic of `isCompatible` here to provide better error message.
if (other == null) {
throw new IncompatibleMergeException("Cannot merge null bloom filter");
}
if (!(other instanceof BloomFilterImpl)) {
throw new IncompatibleMergeException(
"Cannot merge bloom filter of class " + other.getClass().getName()
);
}
BloomFilterImpl that = (BloomFilterImpl) other;
if (this.bitSize() != that.bitSize()) {
throw new IncompatibleMergeException("Cannot merge bloom filters with different bit size");
}
if (this.numHashFunctions != that.numHashFunctions) {
throw new IncompatibleMergeException(
"Cannot merge bloom filters with different number of hash functions"
);
}
this.bits.putAll(that.bits);
return this;
}
@Override
public void writeTo(OutputStream out) throws IOException {
DataOutputStream dos = new DataOutputStream(out);
dos.writeInt(Version.V1.getVersionNumber());
dos.writeInt(numHashFunctions);
bits.writeTo(dos);
}
private void readFrom0(InputStream in) throws IOException {
DataInputStream dis = new DataInputStream(in);
int version = dis.readInt();
if (version != Version.V1.getVersionNumber()) {
throw new IOException("Unexpected Bloom filter version number (" + version + ")");
}
this.numHashFunctions = dis.readInt();
this.bits = BitArray.readFrom(dis);
}
public static BloomFilterImpl readFrom(InputStream in) throws IOException {
BloomFilterImpl filter = new BloomFilterImpl();
filter.readFrom0(in);
return filter;
}
private void writeObject(ObjectOutputStream out) throws IOException {
writeTo(out);
}
private void readObject(ObjectInputStream in) throws IOException {
readFrom0(in);
}
}
| 9,971 |
0 | Create_ds/spark/common/sketch/src/main/java/org/apache/spark/util | Create_ds/spark/common/sketch/src/main/java/org/apache/spark/util/sketch/BloomFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.sketch;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
/**
* A Bloom filter is a space-efficient probabilistic data structure that offers an approximate
* containment test with one-sided error: if it claims that an item is contained in it, this
* might be in error, but if it claims that an item is <i>not</i> contained in it, then this is
* definitely true. Currently supported data types include:
* <ul>
* <li>{@link Byte}</li>
* <li>{@link Short}</li>
* <li>{@link Integer}</li>
* <li>{@link Long}</li>
* <li>{@link String}</li>
* </ul>
* The false positive probability ({@code FPP}) of a Bloom filter is defined as the probability that
* {@linkplain #mightContain(Object)} will erroneously return {@code true} for an object that has
* not actually been put in the {@code BloomFilter}.
*
* The implementation is largely based on the {@code BloomFilter} class from Guava.
*/
public abstract class BloomFilter {
public enum Version {
/**
* {@code BloomFilter} binary format version 1. All values written in big-endian order:
* <ul>
* <li>Version number, always 1 (32 bit)</li>
* <li>Number of hash functions (32 bit)</li>
* <li>Total number of words of the underlying bit array (32 bit)</li>
* <li>The words/longs (numWords * 64 bit)</li>
* </ul>
*/
V1(1);
private final int versionNumber;
Version(int versionNumber) {
this.versionNumber = versionNumber;
}
int getVersionNumber() {
return versionNumber;
}
}
/**
* Returns the probability that {@linkplain #mightContain(Object)} erroneously return {@code true}
* for an object that has not actually been put in the {@code BloomFilter}.
*
* Ideally, this number should be close to the {@code fpp} parameter passed in
* {@linkplain #create(long, double)}, or smaller. If it is significantly higher, it is usually
* the case that too many items (more than expected) have been put in the {@code BloomFilter},
* degenerating it.
*/
public abstract double expectedFpp();
/**
* Returns the number of bits in the underlying bit array.
*/
public abstract long bitSize();
/**
* Puts an item into this {@code BloomFilter}. Ensures that subsequent invocations of
* {@linkplain #mightContain(Object)} with the same item will always return {@code true}.
*
* @return true if the bloom filter's bits changed as a result of this operation. If the bits
* changed, this is <i>definitely</i> the first time {@code object} has been added to the
* filter. If the bits haven't changed, this <i>might</i> be the first time {@code object}
* has been added to the filter. Note that {@code put(t)} always returns the
* <i>opposite</i> result to what {@code mightContain(t)} would have returned at the time
* it is called.
*/
public abstract boolean put(Object item);
/**
* A specialized variant of {@link #put(Object)} that only supports {@code String} items.
*/
public abstract boolean putString(String item);
/**
* A specialized variant of {@link #put(Object)} that only supports {@code long} items.
*/
public abstract boolean putLong(long item);
/**
* A specialized variant of {@link #put(Object)} that only supports byte array items.
*/
public abstract boolean putBinary(byte[] item);
/**
* Determines whether a given bloom filter is compatible with this bloom filter. For two
* bloom filters to be compatible, they must have the same bit size.
*
* @param other The bloom filter to check for compatibility.
*/
public abstract boolean isCompatible(BloomFilter other);
/**
* Combines this bloom filter with another bloom filter by performing a bitwise OR of the
* underlying data. The mutations happen to <b>this</b> instance. Callers must ensure the
* bloom filters are appropriately sized to avoid saturating them.
*
* @param other The bloom filter to combine this bloom filter with. It is not mutated.
* @throws IncompatibleMergeException if {@code isCompatible(other) == false}
*/
public abstract BloomFilter mergeInPlace(BloomFilter other) throws IncompatibleMergeException;
/**
* Returns {@code true} if the element <i>might</i> have been put in this Bloom filter,
* {@code false} if this is <i>definitely</i> not the case.
*/
public abstract boolean mightContain(Object item);
/**
* A specialized variant of {@link #mightContain(Object)} that only tests {@code String} items.
*/
public abstract boolean mightContainString(String item);
/**
* A specialized variant of {@link #mightContain(Object)} that only tests {@code long} items.
*/
public abstract boolean mightContainLong(long item);
/**
* A specialized variant of {@link #mightContain(Object)} that only tests byte array items.
*/
public abstract boolean mightContainBinary(byte[] item);
/**
* Writes out this {@link BloomFilter} to an output stream in binary format. It is the caller's
* responsibility to close the stream.
*/
public abstract void writeTo(OutputStream out) throws IOException;
/**
* Reads in a {@link BloomFilter} from an input stream. It is the caller's responsibility to close
* the stream.
*/
public static BloomFilter readFrom(InputStream in) throws IOException {
return BloomFilterImpl.readFrom(in);
}
/**
* Computes the optimal k (number of hashes per item inserted in Bloom filter), given the
* expected insertions and total number of bits in the Bloom filter.
*
* See http://en.wikipedia.org/wiki/File:Bloom_filter_fp_probability.svg for the formula.
*
* @param n expected insertions (must be positive)
* @param m total number of bits in Bloom filter (must be positive)
*/
private static int optimalNumOfHashFunctions(long n, long m) {
// (m / n) * log(2), but avoid truncation due to division!
return Math.max(1, (int) Math.round((double) m / n * Math.log(2)));
}
/**
* Computes m (total bits of Bloom filter) which is expected to achieve, for the specified
* expected insertions, the required false positive probability.
*
* See http://en.wikipedia.org/wiki/Bloom_filter#Probability_of_false_positives for the formula.
*
* @param n expected insertions (must be positive)
* @param p false positive rate (must be 0 < p < 1)
*/
private static long optimalNumOfBits(long n, double p) {
return (long) (-n * Math.log(p) / (Math.log(2) * Math.log(2)));
}
static final double DEFAULT_FPP = 0.03;
/**
* Creates a {@link BloomFilter} with the expected number of insertions and a default expected
* false positive probability of 3%.
*
* Note that overflowing a {@code BloomFilter} with significantly more elements than specified,
* will result in its saturation, and a sharp deterioration of its false positive probability.
*/
public static BloomFilter create(long expectedNumItems) {
return create(expectedNumItems, DEFAULT_FPP);
}
/**
* Creates a {@link BloomFilter} with the expected number of insertions and expected false
* positive probability.
*
* Note that overflowing a {@code BloomFilter} with significantly more elements than specified,
* will result in its saturation, and a sharp deterioration of its false positive probability.
*/
public static BloomFilter create(long expectedNumItems, double fpp) {
if (fpp <= 0D || fpp >= 1D) {
throw new IllegalArgumentException(
"False positive probability must be within range (0.0, 1.0)"
);
}
return create(expectedNumItems, optimalNumOfBits(expectedNumItems, fpp));
}
/**
* Creates a {@link BloomFilter} with given {@code expectedNumItems} and {@code numBits}, it will
* pick an optimal {@code numHashFunctions} which can minimize {@code fpp} for the bloom filter.
*/
public static BloomFilter create(long expectedNumItems, long numBits) {
if (expectedNumItems <= 0) {
throw new IllegalArgumentException("Expected insertions must be positive");
}
if (numBits <= 0) {
throw new IllegalArgumentException("Number of bits must be positive");
}
return new BloomFilterImpl(optimalNumOfHashFunctions(expectedNumItems, numBits), numBits);
}
}
| 9,972 |
0 | Create_ds/spark/common/sketch/src/main/java/org/apache/spark/util | Create_ds/spark/common/sketch/src/main/java/org/apache/spark/util/sketch/CountMinSketch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.sketch;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
/**
* A Count-min sketch is a probabilistic data structure used for cardinality estimation using
* sub-linear space. Currently, supported data types include:
* <ul>
* <li>{@link Byte}</li>
* <li>{@link Short}</li>
* <li>{@link Integer}</li>
* <li>{@link Long}</li>
* <li>{@link String}</li>
* </ul>
* A {@link CountMinSketch} is initialized with a random seed, and a pair of parameters:
* <ol>
* <li>relative error (or {@code eps}), and
* <li>confidence (or {@code delta})
* </ol>
* Suppose you want to estimate the number of times an element {@code x} has appeared in a data
* stream so far. With probability {@code delta}, the estimate of this frequency is within the
* range {@code true frequency <= estimate <= true frequency + eps * N}, where {@code N} is the
* total count of items have appeared the data stream so far.
*
* Under the cover, a {@link CountMinSketch} is essentially a two-dimensional {@code long} array
* with depth {@code d} and width {@code w}, where
* <ul>
* <li>{@code d = ceil(2 / eps)}</li>
* <li>{@code w = ceil(-log(1 - confidence) / log(2))}</li>
* </ul>
*
* This implementation is largely based on the {@code CountMinSketch} class from stream-lib.
*/
public abstract class CountMinSketch {
public enum Version {
/**
* {@code CountMinSketch} binary format version 1. All values written in big-endian order:
* <ul>
* <li>Version number, always 1 (32 bit)</li>
* <li>Total count of added items (64 bit)</li>
* <li>Depth (32 bit)</li>
* <li>Width (32 bit)</li>
* <li>Hash functions (depth * 64 bit)</li>
* <li>
* Count table
* <ul>
* <li>Row 0 (width * 64 bit)</li>
* <li>Row 1 (width * 64 bit)</li>
* <li>...</li>
* <li>Row {@code depth - 1} (width * 64 bit)</li>
* </ul>
* </li>
* </ul>
*/
V1(1);
private final int versionNumber;
Version(int versionNumber) {
this.versionNumber = versionNumber;
}
int getVersionNumber() {
return versionNumber;
}
}
/**
* Returns the relative error (or {@code eps}) of this {@link CountMinSketch}.
*/
public abstract double relativeError();
/**
* Returns the confidence (or {@code delta}) of this {@link CountMinSketch}.
*/
public abstract double confidence();
/**
* Depth of this {@link CountMinSketch}.
*/
public abstract int depth();
/**
* Width of this {@link CountMinSketch}.
*/
public abstract int width();
/**
* Total count of items added to this {@link CountMinSketch} so far.
*/
public abstract long totalCount();
/**
* Increments {@code item}'s count by one.
*/
public abstract void add(Object item);
/**
* Increments {@code item}'s count by {@code count}.
*/
public abstract void add(Object item, long count);
/**
* Increments {@code item}'s count by one.
*/
public abstract void addLong(long item);
/**
* Increments {@code item}'s count by {@code count}.
*/
public abstract void addLong(long item, long count);
/**
* Increments {@code item}'s count by one.
*/
public abstract void addString(String item);
/**
* Increments {@code item}'s count by {@code count}.
*/
public abstract void addString(String item, long count);
/**
* Increments {@code item}'s count by one.
*/
public abstract void addBinary(byte[] item);
/**
* Increments {@code item}'s count by {@code count}.
*/
public abstract void addBinary(byte[] item, long count);
/**
* Returns the estimated frequency of {@code item}.
*/
public abstract long estimateCount(Object item);
/**
* Merges another {@link CountMinSketch} with this one in place.
*
* Note that only Count-Min sketches with the same {@code depth}, {@code width}, and random seed
* can be merged.
*
* @exception IncompatibleMergeException if the {@code other} {@link CountMinSketch} has
* incompatible depth, width, relative-error, confidence, or random seed.
*/
public abstract CountMinSketch mergeInPlace(CountMinSketch other)
throws IncompatibleMergeException;
/**
* Writes out this {@link CountMinSketch} to an output stream in binary format. It is the caller's
* responsibility to close the stream.
*/
public abstract void writeTo(OutputStream out) throws IOException;
/**
* Serializes this {@link CountMinSketch} and returns the serialized form.
*/
public abstract byte[] toByteArray() throws IOException;
/**
* Reads in a {@link CountMinSketch} from an input stream. It is the caller's responsibility to
* close the stream.
*/
public static CountMinSketch readFrom(InputStream in) throws IOException {
return CountMinSketchImpl.readFrom(in);
}
/**
* Reads in a {@link CountMinSketch} from a byte array.
*/
public static CountMinSketch readFrom(byte[] bytes) throws IOException {
InputStream in = new ByteArrayInputStream(bytes);
CountMinSketch cms = readFrom(in);
in.close();
return cms;
}
/**
* Creates a {@link CountMinSketch} with given {@code depth}, {@code width}, and random
* {@code seed}.
*
* @param depth depth of the Count-min Sketch, must be positive
* @param width width of the Count-min Sketch, must be positive
* @param seed random seed
*/
public static CountMinSketch create(int depth, int width, int seed) {
return new CountMinSketchImpl(depth, width, seed);
}
/**
* Creates a {@link CountMinSketch} with given relative error ({@code eps}), {@code confidence},
* and random {@code seed}.
*
* @param eps relative error, must be positive
* @param confidence confidence, must be positive and less than 1.0
* @param seed random seed
*/
public static CountMinSketch create(double eps, double confidence, int seed) {
return new CountMinSketchImpl(eps, confidence, seed);
}
}
| 9,973 |
0 | Create_ds/spark/common/sketch/src/main/java/org/apache/spark/util | Create_ds/spark/common/sketch/src/main/java/org/apache/spark/util/sketch/CountMinSketchImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.sketch;
import java.io.*;
import java.util.Arrays;
import java.util.Random;
class CountMinSketchImpl extends CountMinSketch implements Serializable {
private static final long PRIME_MODULUS = (1L << 31) - 1;
private int depth;
private int width;
private long[][] table;
private long[] hashA;
private long totalCount;
private double eps;
private double confidence;
private CountMinSketchImpl() {}
CountMinSketchImpl(int depth, int width, int seed) {
if (depth <= 0 || width <= 0) {
throw new IllegalArgumentException("Depth and width must be both positive");
}
this.depth = depth;
this.width = width;
this.eps = 2.0 / width;
this.confidence = 1 - 1 / Math.pow(2, depth);
initTablesWith(depth, width, seed);
}
CountMinSketchImpl(double eps, double confidence, int seed) {
if (eps <= 0D) {
throw new IllegalArgumentException("Relative error must be positive");
}
if (confidence <= 0D || confidence >= 1D) {
throw new IllegalArgumentException("Confidence must be within range (0.0, 1.0)");
}
// 2/w = eps ; w = 2/eps
// 1/2^depth <= 1-confidence ; depth >= -log2 (1-confidence)
this.eps = eps;
this.confidence = confidence;
this.width = (int) Math.ceil(2 / eps);
this.depth = (int) Math.ceil(-Math.log(1 - confidence) / Math.log(2));
initTablesWith(depth, width, seed);
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
}
if (other == null || !(other instanceof CountMinSketchImpl)) {
return false;
}
CountMinSketchImpl that = (CountMinSketchImpl) other;
return
this.depth == that.depth &&
this.width == that.width &&
this.totalCount == that.totalCount &&
Arrays.equals(this.hashA, that.hashA) &&
Arrays.deepEquals(this.table, that.table);
}
@Override
public int hashCode() {
int hash = depth;
hash = hash * 31 + width;
hash = hash * 31 + (int) (totalCount ^ (totalCount >>> 32));
hash = hash * 31 + Arrays.hashCode(hashA);
hash = hash * 31 + Arrays.deepHashCode(table);
return hash;
}
private void initTablesWith(int depth, int width, int seed) {
this.table = new long[depth][width];
this.hashA = new long[depth];
Random r = new Random(seed);
// We're using a linear hash functions
// of the form (a*x+b) mod p.
// a,b are chosen independently for each hash function.
// However we can set b = 0 as all it does is shift the results
// without compromising their uniformity or independence with
// the other hashes.
for (int i = 0; i < depth; ++i) {
hashA[i] = r.nextInt(Integer.MAX_VALUE);
}
}
@Override
public double relativeError() {
return eps;
}
@Override
public double confidence() {
return confidence;
}
@Override
public int depth() {
return depth;
}
@Override
public int width() {
return width;
}
@Override
public long totalCount() {
return totalCount;
}
@Override
public void add(Object item) {
add(item, 1);
}
@Override
public void add(Object item, long count) {
if (item instanceof String) {
addString((String) item, count);
} else if (item instanceof byte[]) {
addBinary((byte[]) item, count);
} else {
addLong(Utils.integralToLong(item), count);
}
}
@Override
public void addString(String item) {
addString(item, 1);
}
@Override
public void addString(String item, long count) {
addBinary(Utils.getBytesFromUTF8String(item), count);
}
@Override
public void addLong(long item) {
addLong(item, 1);
}
@Override
public void addLong(long item, long count) {
if (count < 0) {
throw new IllegalArgumentException("Negative increments not implemented");
}
for (int i = 0; i < depth; ++i) {
table[i][hash(item, i)] += count;
}
totalCount += count;
}
@Override
public void addBinary(byte[] item) {
addBinary(item, 1);
}
@Override
public void addBinary(byte[] item, long count) {
if (count < 0) {
throw new IllegalArgumentException("Negative increments not implemented");
}
int[] buckets = getHashBuckets(item, depth, width);
for (int i = 0; i < depth; ++i) {
table[i][buckets[i]] += count;
}
totalCount += count;
}
private int hash(long item, int count) {
long hash = hashA[count] * item;
// A super fast way of computing x mod 2^p-1
// See http://www.cs.princeton.edu/courses/archive/fall09/cos521/Handouts/universalclasses.pdf
// page 149, right after Proposition 7.
hash += hash >> 32;
hash &= PRIME_MODULUS;
// Doing "%" after (int) conversion is ~2x faster than %'ing longs.
return ((int) hash) % width;
}
private static int[] getHashBuckets(String key, int hashCount, int max) {
return getHashBuckets(Utils.getBytesFromUTF8String(key), hashCount, max);
}
private static int[] getHashBuckets(byte[] b, int hashCount, int max) {
int[] result = new int[hashCount];
int hash1 = Murmur3_x86_32.hashUnsafeBytes(b, Platform.BYTE_ARRAY_OFFSET, b.length, 0);
int hash2 = Murmur3_x86_32.hashUnsafeBytes(b, Platform.BYTE_ARRAY_OFFSET, b.length, hash1);
for (int i = 0; i < hashCount; i++) {
result[i] = Math.abs((hash1 + i * hash2) % max);
}
return result;
}
@Override
public long estimateCount(Object item) {
if (item instanceof String) {
return estimateCountForStringItem((String) item);
} else if (item instanceof byte[]) {
return estimateCountForBinaryItem((byte[]) item);
} else {
return estimateCountForLongItem(Utils.integralToLong(item));
}
}
private long estimateCountForLongItem(long item) {
long res = Long.MAX_VALUE;
for (int i = 0; i < depth; ++i) {
res = Math.min(res, table[i][hash(item, i)]);
}
return res;
}
private long estimateCountForStringItem(String item) {
long res = Long.MAX_VALUE;
int[] buckets = getHashBuckets(item, depth, width);
for (int i = 0; i < depth; ++i) {
res = Math.min(res, table[i][buckets[i]]);
}
return res;
}
private long estimateCountForBinaryItem(byte[] item) {
long res = Long.MAX_VALUE;
int[] buckets = getHashBuckets(item, depth, width);
for (int i = 0; i < depth; ++i) {
res = Math.min(res, table[i][buckets[i]]);
}
return res;
}
@Override
public CountMinSketch mergeInPlace(CountMinSketch other) throws IncompatibleMergeException {
if (other == null) {
throw new IncompatibleMergeException("Cannot merge null estimator");
}
if (!(other instanceof CountMinSketchImpl)) {
throw new IncompatibleMergeException(
"Cannot merge estimator of class " + other.getClass().getName()
);
}
CountMinSketchImpl that = (CountMinSketchImpl) other;
if (this.depth != that.depth) {
throw new IncompatibleMergeException("Cannot merge estimators of different depth");
}
if (this.width != that.width) {
throw new IncompatibleMergeException("Cannot merge estimators of different width");
}
if (!Arrays.equals(this.hashA, that.hashA)) {
throw new IncompatibleMergeException("Cannot merge estimators of different seed");
}
for (int i = 0; i < this.table.length; ++i) {
for (int j = 0; j < this.table[i].length; ++j) {
this.table[i][j] = this.table[i][j] + that.table[i][j];
}
}
this.totalCount += that.totalCount;
return this;
}
@Override
public void writeTo(OutputStream out) throws IOException {
DataOutputStream dos = new DataOutputStream(out);
dos.writeInt(Version.V1.getVersionNumber());
dos.writeLong(this.totalCount);
dos.writeInt(this.depth);
dos.writeInt(this.width);
for (int i = 0; i < this.depth; ++i) {
dos.writeLong(this.hashA[i]);
}
for (int i = 0; i < this.depth; ++i) {
for (int j = 0; j < this.width; ++j) {
dos.writeLong(table[i][j]);
}
}
}
@Override
public byte[] toByteArray() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
writeTo(out);
out.close();
return out.toByteArray();
}
public static CountMinSketchImpl readFrom(InputStream in) throws IOException {
CountMinSketchImpl sketch = new CountMinSketchImpl();
sketch.readFrom0(in);
return sketch;
}
private void readFrom0(InputStream in) throws IOException {
DataInputStream dis = new DataInputStream(in);
int version = dis.readInt();
if (version != Version.V1.getVersionNumber()) {
throw new IOException("Unexpected Count-Min Sketch version number (" + version + ")");
}
this.totalCount = dis.readLong();
this.depth = dis.readInt();
this.width = dis.readInt();
this.eps = 2.0 / width;
this.confidence = 1 - 1 / Math.pow(2, depth);
this.hashA = new long[depth];
for (int i = 0; i < depth; ++i) {
this.hashA[i] = dis.readLong();
}
this.table = new long[depth][width];
for (int i = 0; i < depth; ++i) {
for (int j = 0; j < width; ++j) {
this.table[i][j] = dis.readLong();
}
}
}
private void writeObject(ObjectOutputStream out) throws IOException {
this.writeTo(out);
}
private void readObject(ObjectInputStream in) throws IOException {
this.readFrom0(in);
}
}
| 9,974 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/JavaStatusTrackerDemo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples;
import org.apache.spark.SparkJobInfo;
import org.apache.spark.SparkStageInfo;
import org.apache.spark.api.java.JavaFutureAction;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.sql.SparkSession;
import java.util.Arrays;
import java.util.List;
/**
* Example of using Spark's status APIs from Java.
*/
public final class JavaStatusTrackerDemo {
public static final String APP_NAME = "JavaStatusAPIDemo";
public static final class IdentityWithDelay<T> implements Function<T, T> {
@Override
public T call(T x) throws Exception {
Thread.sleep(2 * 1000); // 2 seconds
return x;
}
}
public static void main(String[] args) throws Exception {
SparkSession spark = SparkSession
.builder()
.appName(APP_NAME)
.getOrCreate();
JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());
// Example of implementing a progress reporter for a simple job.
JavaRDD<Integer> rdd = jsc.parallelize(Arrays.asList(1, 2, 3, 4, 5), 5).map(
new IdentityWithDelay<>());
JavaFutureAction<List<Integer>> jobFuture = rdd.collectAsync();
while (!jobFuture.isDone()) {
Thread.sleep(1000); // 1 second
List<Integer> jobIds = jobFuture.jobIds();
if (jobIds.isEmpty()) {
continue;
}
int currentJobId = jobIds.get(jobIds.size() - 1);
SparkJobInfo jobInfo = jsc.statusTracker().getJobInfo(currentJobId);
SparkStageInfo stageInfo = jsc.statusTracker().getStageInfo(jobInfo.stageIds()[0]);
System.out.println(stageInfo.numTasks() + " tasks total: " + stageInfo.numActiveTasks() +
" active, " + stageInfo.numCompletedTasks() + " complete");
}
System.out.println("Job results are: " + jobFuture.get());
spark.stop();
}
}
| 9,975 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/JavaTC.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Random;
import java.util.Set;
import scala.Tuple2;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.sql.SparkSession;
/**
* Transitive closure on a graph, implemented in Java.
* Usage: JavaTC [partitions]
*/
public final class JavaTC {
private static final int numEdges = 200;
private static final int numVertices = 100;
private static final Random rand = new Random(42);
static List<Tuple2<Integer, Integer>> generateGraph() {
Set<Tuple2<Integer, Integer>> edges = new HashSet<>(numEdges);
while (edges.size() < numEdges) {
int from = rand.nextInt(numVertices);
int to = rand.nextInt(numVertices);
Tuple2<Integer, Integer> e = new Tuple2<>(from, to);
if (from != to) {
edges.add(e);
}
}
return new ArrayList<>(edges);
}
static class ProjectFn implements PairFunction<Tuple2<Integer, Tuple2<Integer, Integer>>,
Integer, Integer> {
static final ProjectFn INSTANCE = new ProjectFn();
@Override
public Tuple2<Integer, Integer> call(Tuple2<Integer, Tuple2<Integer, Integer>> triple) {
return new Tuple2<>(triple._2()._2(), triple._2()._1());
}
}
public static void main(String[] args) {
SparkSession spark = SparkSession
.builder()
.appName("JavaTC")
.getOrCreate();
JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());
Integer slices = (args.length > 0) ? Integer.parseInt(args[0]): 2;
JavaPairRDD<Integer, Integer> tc = jsc.parallelizePairs(generateGraph(), slices).cache();
// Linear transitive closure: each round grows paths by one edge,
// by joining the graph's edges with the already-discovered paths.
// e.g. join the path (y, z) from the TC with the edge (x, y) from
// the graph to obtain the path (x, z).
// Because join() joins on keys, the edges are stored in reversed order.
JavaPairRDD<Integer, Integer> edges = tc.mapToPair(e -> new Tuple2<>(e._2(), e._1()));
long oldCount;
long nextCount = tc.count();
do {
oldCount = nextCount;
// Perform the join, obtaining an RDD of (y, (z, x)) pairs,
// then project the result to obtain the new (x, z) paths.
tc = tc.union(tc.join(edges).mapToPair(ProjectFn.INSTANCE)).distinct().cache();
nextCount = tc.count();
} while (nextCount != oldCount);
System.out.println("TC has " + tc.count() + " edges.");
spark.stop();
}
}
| 9,976 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples;
import scala.Tuple2;
import scala.Tuple3;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.SparkSession;
import java.io.Serializable;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Executes a roll up-style query against Apache logs.
*
* Usage: JavaLogQuery [logFile]
*/
public final class JavaLogQuery {
public static final List<String> exampleApacheLogs = Arrays.asList(
"10.10.10.10 - \"FRED\" [18/Jan/2013:17:56:07 +1100] \"GET http://images.com/2013/Generic.jpg " +
"HTTP/1.1\" 304 315 \"http://referall.com/\" \"Mozilla/4.0 (compatible; MSIE 7.0; " +
"Windows NT 5.1; GTB7.4; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; " +
".NET CLR 3.5.21022; .NET CLR 3.0.4506.2152; .NET CLR 1.0.3705; .NET CLR 1.1.4322; .NET CLR " +
"3.5.30729; Release=ARP)\" \"UD-1\" - \"image/jpeg\" \"whatever\" 0.350 \"-\" - \"\" 265 923 934 \"\" " +
"62.24.11.25 images.com 1358492167 - Whatup",
"10.10.10.10 - \"FRED\" [18/Jan/2013:18:02:37 +1100] \"GET http://images.com/2013/Generic.jpg " +
"HTTP/1.1\" 304 306 \"http:/referall.com\" \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; " +
"GTB7.4; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR " +
"3.5.21022; .NET CLR 3.0.4506.2152; .NET CLR 1.0.3705; .NET CLR 1.1.4322; .NET CLR " +
"3.5.30729; Release=ARP)\" \"UD-1\" - \"image/jpeg\" \"whatever\" 0.352 \"-\" - \"\" 256 977 988 \"\" " +
"0 73.23.2.15 images.com 1358492557 - Whatup");
public static final Pattern apacheLogRegex = Pattern.compile(
"^([\\d.]+) (\\S+) (\\S+) \\[([\\w\\d:/]+\\s[+\\-]\\d{4})\\] \"(.+?)\" (\\d{3}) ([\\d\\-]+) \"([^\"]+)\" \"([^\"]+)\".*");
/** Tracks the total query count and number of aggregate bytes for a particular group. */
public static class Stats implements Serializable {
private final int count;
private final int numBytes;
public Stats(int count, int numBytes) {
this.count = count;
this.numBytes = numBytes;
}
public Stats merge(Stats other) {
return new Stats(count + other.count, numBytes + other.numBytes);
}
public String toString() {
return String.format("bytes=%s\tn=%s", numBytes, count);
}
}
public static Tuple3<String, String, String> extractKey(String line) {
Matcher m = apacheLogRegex.matcher(line);
if (m.find()) {
String ip = m.group(1);
String user = m.group(3);
String query = m.group(5);
if (!user.equalsIgnoreCase("-")) {
return new Tuple3<>(ip, user, query);
}
}
return new Tuple3<>(null, null, null);
}
public static Stats extractStats(String line) {
Matcher m = apacheLogRegex.matcher(line);
if (m.find()) {
int bytes = Integer.parseInt(m.group(7));
return new Stats(1, bytes);
} else {
return new Stats(1, 0);
}
}
public static void main(String[] args) {
SparkSession spark = SparkSession
.builder()
.appName("JavaLogQuery")
.getOrCreate();
JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());
JavaRDD<String> dataSet = (args.length == 1) ? jsc.textFile(args[0]) : jsc.parallelize(exampleApacheLogs);
JavaPairRDD<Tuple3<String, String, String>, Stats> extracted =
dataSet.mapToPair(s -> new Tuple2<>(extractKey(s), extractStats(s)));
JavaPairRDD<Tuple3<String, String, String>, Stats> counts = extracted.reduceByKey(Stats::merge);
List<Tuple2<Tuple3<String, String, String>, Stats>> output = counts.collect();
for (Tuple2<?,?> t : output) {
System.out.println(t._1() + "\t" + t._2());
}
spark.stop();
}
}
| 9,977 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.SparkSession;
import java.util.ArrayList;
import java.util.List;
/**
* Computes an approximation to pi
* Usage: JavaSparkPi [partitions]
*/
public final class JavaSparkPi {
public static void main(String[] args) throws Exception {
SparkSession spark = SparkSession
.builder()
.appName("JavaSparkPi")
.getOrCreate();
JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());
int slices = (args.length == 1) ? Integer.parseInt(args[0]) : 2;
int n = 100000 * slices;
List<Integer> l = new ArrayList<>(n);
for (int i = 0; i < n; i++) {
l.add(i);
}
JavaRDD<Integer> dataSet = jsc.parallelize(l, slices);
int count = dataSet.map(integer -> {
double x = Math.random() * 2 - 1;
double y = Math.random() * 2 - 1;
return (x * x + y * y <= 1) ? 1 : 0;
}).reduce((integer, integer2) -> integer + integer2);
System.out.println("Pi is roughly " + 4.0 * count / n);
spark.stop();
}
}
| 9,978 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Pattern;
import scala.Tuple2;
import com.google.common.collect.Iterables;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.sql.SparkSession;
/**
* Computes the PageRank of URLs from an input file. Input file should
* be in format of:
* URL neighbor URL
* URL neighbor URL
* URL neighbor URL
* ...
* where URL and their neighbors are separated by space(s).
*
* This is an example implementation for learning how to use Spark. For more conventional use,
* please refer to org.apache.spark.graphx.lib.PageRank
*
* Example Usage:
* <pre>
* bin/run-example JavaPageRank data/mllib/pagerank_data.txt 10
* </pre>
*/
public final class JavaPageRank {
private static final Pattern SPACES = Pattern.compile("\\s+");
static void showWarning() {
String warning = "WARN: This is a naive implementation of PageRank " +
"and is given as an example! \n" +
"Please use the PageRank implementation found in " +
"org.apache.spark.graphx.lib.PageRank for more conventional use.";
System.err.println(warning);
}
private static class Sum implements Function2<Double, Double, Double> {
@Override
public Double call(Double a, Double b) {
return a + b;
}
}
public static void main(String[] args) throws Exception {
if (args.length < 2) {
System.err.println("Usage: JavaPageRank <file> <number_of_iterations>");
System.exit(1);
}
showWarning();
SparkSession spark = SparkSession
.builder()
.appName("JavaPageRank")
.getOrCreate();
// Loads in input file. It should be in format of:
// URL neighbor URL
// URL neighbor URL
// URL neighbor URL
// ...
JavaRDD<String> lines = spark.read().textFile(args[0]).javaRDD();
// Loads all URLs from input file and initialize their neighbors.
JavaPairRDD<String, Iterable<String>> links = lines.mapToPair(s -> {
String[] parts = SPACES.split(s);
return new Tuple2<>(parts[0], parts[1]);
}).distinct().groupByKey().cache();
// Loads all URLs with other URL(s) link to from input file and initialize ranks of them to one.
JavaPairRDD<String, Double> ranks = links.mapValues(rs -> 1.0);
// Calculates and updates URL ranks continuously using PageRank algorithm.
for (int current = 0; current < Integer.parseInt(args[1]); current++) {
// Calculates URL contributions to the rank of other URLs.
JavaPairRDD<String, Double> contribs = links.join(ranks).values()
.flatMapToPair(s -> {
int urlCount = Iterables.size(s._1());
List<Tuple2<String, Double>> results = new ArrayList<>();
for (String n : s._1) {
results.add(new Tuple2<>(n, s._2() / urlCount));
}
return results.iterator();
});
// Re-calculates URL ranks based on neighbor contributions.
ranks = contribs.reduceByKey(new Sum()).mapValues(sum -> 0.15 + sum * 0.85);
}
// Collects all URL ranks and dump them to console.
List<Tuple2<String, Double>> output = ranks.collect();
for (Tuple2<?,?> tuple : output) {
System.out.println(tuple._1() + " has rank: " + tuple._2() + ".");
}
spark.stop();
}
}
| 9,979 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/JavaHdfsLR.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.sql.SparkSession;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Random;
import java.util.regex.Pattern;
/**
* Logistic regression based classification.
*
* This is an example implementation for learning how to use Spark. For more conventional use,
* please refer to org.apache.spark.ml.classification.LogisticRegression.
*/
public final class JavaHdfsLR {
private static final int D = 10; // Number of dimensions
private static final Random rand = new Random(42);
static void showWarning() {
String warning = "WARN: This is a naive implementation of Logistic Regression " +
"and is given as an example!\n" +
"Please use org.apache.spark.ml.classification.LogisticRegression " +
"for more conventional use.";
System.err.println(warning);
}
static class DataPoint implements Serializable {
DataPoint(double[] x, double y) {
this.x = x;
this.y = y;
}
double[] x;
double y;
}
static class ParsePoint implements Function<String, DataPoint> {
private static final Pattern SPACE = Pattern.compile(" ");
@Override
public DataPoint call(String line) {
String[] tok = SPACE.split(line);
double y = Double.parseDouble(tok[0]);
double[] x = new double[D];
for (int i = 0; i < D; i++) {
x[i] = Double.parseDouble(tok[i + 1]);
}
return new DataPoint(x, y);
}
}
static class VectorSum implements Function2<double[], double[], double[]> {
@Override
public double[] call(double[] a, double[] b) {
double[] result = new double[D];
for (int j = 0; j < D; j++) {
result[j] = a[j] + b[j];
}
return result;
}
}
static class ComputeGradient implements Function<DataPoint, double[]> {
private final double[] weights;
ComputeGradient(double[] weights) {
this.weights = weights;
}
@Override
public double[] call(DataPoint p) {
double[] gradient = new double[D];
for (int i = 0; i < D; i++) {
double dot = dot(weights, p.x);
gradient[i] = (1 / (1 + Math.exp(-p.y * dot)) - 1) * p.y * p.x[i];
}
return gradient;
}
}
public static double dot(double[] a, double[] b) {
double x = 0;
for (int i = 0; i < D; i++) {
x += a[i] * b[i];
}
return x;
}
public static void printWeights(double[] a) {
System.out.println(Arrays.toString(a));
}
public static void main(String[] args) {
if (args.length < 2) {
System.err.println("Usage: JavaHdfsLR <file> <iters>");
System.exit(1);
}
showWarning();
SparkSession spark = SparkSession
.builder()
.appName("JavaHdfsLR")
.getOrCreate();
JavaRDD<String> lines = spark.read().textFile(args[0]).javaRDD();
JavaRDD<DataPoint> points = lines.map(new ParsePoint()).cache();
int ITERATIONS = Integer.parseInt(args[1]);
// Initialize w to a random value
double[] w = new double[D];
for (int i = 0; i < D; i++) {
w[i] = 2 * rand.nextDouble() - 1;
}
System.out.print("Initial w: ");
printWeights(w);
for (int i = 1; i <= ITERATIONS; i++) {
System.out.println("On iteration " + i);
double[] gradient = points.map(
new ComputeGradient(w)
).reduce(new VectorSum());
for (int j = 0; j < D; j++) {
w[j] -= gradient[j];
}
}
System.out.print("Final w: ");
printWeights(w);
spark.stop();
}
}
| 9,980 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples;
import scala.Tuple2;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.sql.SparkSession;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Pattern;
public final class JavaWordCount {
private static final Pattern SPACE = Pattern.compile(" ");
public static void main(String[] args) throws Exception {
if (args.length < 1) {
System.err.println("Usage: JavaWordCount <file>");
System.exit(1);
}
SparkSession spark = SparkSession
.builder()
.appName("JavaWordCount")
.getOrCreate();
JavaRDD<String> lines = spark.read().textFile(args[0]).javaRDD();
JavaRDD<String> words = lines.flatMap(s -> Arrays.asList(SPACE.split(s)).iterator());
JavaPairRDD<String, Integer> ones = words.mapToPair(s -> new Tuple2<>(s, 1));
JavaPairRDD<String, Integer> counts = ones.reduceByKey((i1, i2) -> i1 + i2);
List<Tuple2<String, Integer>> output = counts.collect();
for (Tuple2<?,?> tuple : output) {
System.out.println(tuple._1() + ": " + tuple._2());
}
spark.stop();
}
}
| 9,981 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark/examples | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/mllib/JavaNaiveBayesExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.mllib;
// $example on$
import scala.Tuple2;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.mllib.classification.NaiveBayes;
import org.apache.spark.mllib.classification.NaiveBayesModel;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.util.MLUtils;
// $example off$
import org.apache.spark.SparkConf;
public class JavaNaiveBayesExample {
public static void main(String[] args) {
SparkConf sparkConf = new SparkConf().setAppName("JavaNaiveBayesExample");
JavaSparkContext jsc = new JavaSparkContext(sparkConf);
// $example on$
String path = "data/mllib/sample_libsvm_data.txt";
JavaRDD<LabeledPoint> inputData = MLUtils.loadLibSVMFile(jsc.sc(), path).toJavaRDD();
JavaRDD<LabeledPoint>[] tmp = inputData.randomSplit(new double[]{0.6, 0.4});
JavaRDD<LabeledPoint> training = tmp[0]; // training set
JavaRDD<LabeledPoint> test = tmp[1]; // test set
NaiveBayesModel model = NaiveBayes.train(training.rdd(), 1.0);
JavaPairRDD<Double, Double> predictionAndLabel =
test.mapToPair(p -> new Tuple2<>(model.predict(p.features()), p.label()));
double accuracy =
predictionAndLabel.filter(pl -> pl._1().equals(pl._2())).count() / (double) test.count();
// Save and load model
model.save(jsc.sc(), "target/tmp/myNaiveBayesModel");
NaiveBayesModel sameModel = NaiveBayesModel.load(jsc.sc(), "target/tmp/myNaiveBayesModel");
// $example off$
jsc.stop();
}
}
| 9,982 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark/examples | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/mllib/JavaPrefixSpanExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.mllib;
// $example on$
import java.util.Arrays;
import java.util.List;
// $example off$
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
// $example on$
import org.apache.spark.mllib.fpm.PrefixSpan;
import org.apache.spark.mllib.fpm.PrefixSpanModel;
// $example off$
import org.apache.spark.SparkConf;
public class JavaPrefixSpanExample {
public static void main(String[] args) {
SparkConf sparkConf = new SparkConf().setAppName("JavaPrefixSpanExample");
JavaSparkContext sc = new JavaSparkContext(sparkConf);
// $example on$
JavaRDD<List<List<Integer>>> sequences = sc.parallelize(Arrays.asList(
Arrays.asList(Arrays.asList(1, 2), Arrays.asList(3)),
Arrays.asList(Arrays.asList(1), Arrays.asList(3, 2), Arrays.asList(1, 2)),
Arrays.asList(Arrays.asList(1, 2), Arrays.asList(5)),
Arrays.asList(Arrays.asList(6))
), 2);
PrefixSpan prefixSpan = new PrefixSpan()
.setMinSupport(0.5)
.setMaxPatternLength(5);
PrefixSpanModel<Integer> model = prefixSpan.run(sequences);
for (PrefixSpan.FreqSequence<Integer> freqSeq: model.freqSequences().toJavaRDD().collect()) {
System.out.println(freqSeq.javaSequence() + ", " + freqSeq.freq());
}
// $example off$
sc.stop();
}
}
| 9,983 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark/examples | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/mllib/JavaHypothesisTestingKolmogorovSmirnovTestExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.mllib;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
// $example on$
import java.util.Arrays;
import org.apache.spark.api.java.JavaDoubleRDD;
import org.apache.spark.mllib.stat.Statistics;
import org.apache.spark.mllib.stat.test.KolmogorovSmirnovTestResult;
// $example off$
public class JavaHypothesisTestingKolmogorovSmirnovTestExample {
public static void main(String[] args) {
SparkConf conf =
new SparkConf().setAppName("JavaHypothesisTestingKolmogorovSmirnovTestExample");
JavaSparkContext jsc = new JavaSparkContext(conf);
// $example on$
JavaDoubleRDD data = jsc.parallelizeDoubles(Arrays.asList(0.1, 0.15, 0.2, 0.3, 0.25));
KolmogorovSmirnovTestResult testResult =
Statistics.kolmogorovSmirnovTest(data, "norm", 0.0, 1.0);
// summary of the test including the p-value, test statistic, and null hypothesis
// if our p-value indicates significance, we can reject the null hypothesis
System.out.println(testResult);
// $example off$
jsc.stop();
}
}
| 9,984 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark/examples | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/mllib/JavaSVMWithSGDExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.mllib;
import org.apache.spark.SparkConf;
import org.apache.spark.SparkContext;
// $example on$
import scala.Tuple2;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.classification.SVMModel;
import org.apache.spark.mllib.classification.SVMWithSGD;
import org.apache.spark.mllib.evaluation.BinaryClassificationMetrics;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.util.MLUtils;
// $example off$
/**
* Example for SVMWithSGD.
*/
public class JavaSVMWithSGDExample {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("JavaSVMWithSGDExample");
SparkContext sc = new SparkContext(conf);
// $example on$
String path = "data/mllib/sample_libsvm_data.txt";
JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(sc, path).toJavaRDD();
// Split initial RDD into two... [60% training data, 40% testing data].
JavaRDD<LabeledPoint> training = data.sample(false, 0.6, 11L);
training.cache();
JavaRDD<LabeledPoint> test = data.subtract(training);
// Run training algorithm to build the model.
int numIterations = 100;
SVMModel model = SVMWithSGD.train(training.rdd(), numIterations);
// Clear the default threshold.
model.clearThreshold();
// Compute raw scores on the test set.
JavaRDD<Tuple2<Object, Object>> scoreAndLabels = test.map(p ->
new Tuple2<>(model.predict(p.features()), p.label()));
// Get evaluation metrics.
BinaryClassificationMetrics metrics =
new BinaryClassificationMetrics(JavaRDD.toRDD(scoreAndLabels));
double auROC = metrics.areaUnderROC();
System.out.println("Area under ROC = " + auROC);
// Save and load model
model.save(sc, "target/tmp/javaSVMWithSGDModel");
SVMModel sameModel = SVMModel.load(sc, "target/tmp/javaSVMWithSGDModel");
// $example off$
sc.stop();
}
}
| 9,985 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark/examples | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/mllib/JavaKMeansExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.mllib;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
// $example on$
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.clustering.KMeans;
import org.apache.spark.mllib.clustering.KMeansModel;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.linalg.Vectors;
// $example off$
public class JavaKMeansExample {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("JavaKMeansExample");
JavaSparkContext jsc = new JavaSparkContext(conf);
// $example on$
// Load and parse data
String path = "data/mllib/kmeans_data.txt";
JavaRDD<String> data = jsc.textFile(path);
JavaRDD<Vector> parsedData = data.map(s -> {
String[] sarray = s.split(" ");
double[] values = new double[sarray.length];
for (int i = 0; i < sarray.length; i++) {
values[i] = Double.parseDouble(sarray[i]);
}
return Vectors.dense(values);
});
parsedData.cache();
// Cluster the data into two classes using KMeans
int numClusters = 2;
int numIterations = 20;
KMeansModel clusters = KMeans.train(parsedData.rdd(), numClusters, numIterations);
System.out.println("Cluster centers:");
for (Vector center: clusters.clusterCenters()) {
System.out.println(" " + center);
}
double cost = clusters.computeCost(parsedData.rdd());
System.out.println("Cost: " + cost);
// Evaluate clustering by computing Within Set Sum of Squared Errors
double WSSSE = clusters.computeCost(parsedData.rdd());
System.out.println("Within Set Sum of Squared Errors = " + WSSSE);
// Save and load model
clusters.save(jsc.sc(), "target/org/apache/spark/JavaKMeansExample/KMeansModel");
KMeansModel sameModel = KMeansModel.load(jsc.sc(),
"target/org/apache/spark/JavaKMeansExample/KMeansModel");
// $example off$
jsc.stop();
}
}
| 9,986 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark/examples | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/mllib/JavaRegressionMetricsExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.mllib;
// $example on$
import scala.Tuple2;
import org.apache.spark.api.java.*;
import org.apache.spark.mllib.linalg.Vectors;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.regression.LinearRegressionModel;
import org.apache.spark.mllib.regression.LinearRegressionWithSGD;
import org.apache.spark.mllib.evaluation.RegressionMetrics;
import org.apache.spark.SparkConf;
// $example off$
public class JavaRegressionMetricsExample {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("Java Regression Metrics Example");
JavaSparkContext sc = new JavaSparkContext(conf);
// $example on$
// Load and parse the data
String path = "data/mllib/sample_linear_regression_data.txt";
JavaRDD<String> data = sc.textFile(path);
JavaRDD<LabeledPoint> parsedData = data.map(line -> {
String[] parts = line.split(" ");
double[] v = new double[parts.length - 1];
for (int i = 1; i < parts.length; i++) {
v[i - 1] = Double.parseDouble(parts[i].split(":")[1]);
}
return new LabeledPoint(Double.parseDouble(parts[0]), Vectors.dense(v));
});
parsedData.cache();
// Building the model
int numIterations = 100;
LinearRegressionModel model = LinearRegressionWithSGD.train(JavaRDD.toRDD(parsedData),
numIterations);
// Evaluate model on training examples and compute training error
JavaPairRDD<Object, Object> valuesAndPreds = parsedData.mapToPair(point ->
new Tuple2<>(model.predict(point.features()), point.label()));
// Instantiate metrics object
RegressionMetrics metrics = new RegressionMetrics(valuesAndPreds.rdd());
// Squared error
System.out.format("MSE = %f\n", metrics.meanSquaredError());
System.out.format("RMSE = %f\n", metrics.rootMeanSquaredError());
// R-squared
System.out.format("R Squared = %f\n", metrics.r2());
// Mean absolute error
System.out.format("MAE = %f\n", metrics.meanAbsoluteError());
// Explained variance
System.out.format("Explained Variance = %f\n", metrics.explainedVariance());
// Save and load model
model.save(sc.sc(), "target/tmp/LogisticRegressionModel");
LinearRegressionModel sameModel = LinearRegressionModel.load(sc.sc(),
"target/tmp/LogisticRegressionModel");
// $example off$
sc.stop();
}
}
| 9,987 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark/examples | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/mllib/JavaChiSqSelectorExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.mllib;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
// $example on$
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.feature.ChiSqSelector;
import org.apache.spark.mllib.feature.ChiSqSelectorModel;
import org.apache.spark.mllib.linalg.Vectors;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.util.MLUtils;
// $example off$
public class JavaChiSqSelectorExample {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("JavaChiSqSelectorExample");
JavaSparkContext jsc = new JavaSparkContext(conf);
// $example on$
JavaRDD<LabeledPoint> points = MLUtils.loadLibSVMFile(jsc.sc(),
"data/mllib/sample_libsvm_data.txt").toJavaRDD().cache();
// Discretize data in 16 equal bins since ChiSqSelector requires categorical features
// Although features are doubles, the ChiSqSelector treats each unique value as a category
JavaRDD<LabeledPoint> discretizedData = points.map(lp -> {
double[] discretizedFeatures = new double[lp.features().size()];
for (int i = 0; i < lp.features().size(); ++i) {
discretizedFeatures[i] = Math.floor(lp.features().apply(i) / 16);
}
return new LabeledPoint(lp.label(), Vectors.dense(discretizedFeatures));
});
// Create ChiSqSelector that will select top 50 of 692 features
ChiSqSelector selector = new ChiSqSelector(50);
// Create ChiSqSelector model (selecting features)
ChiSqSelectorModel transformer = selector.fit(discretizedData.rdd());
// Filter the top 50 features from each feature vector
JavaRDD<LabeledPoint> filteredData = discretizedData.map(lp ->
new LabeledPoint(lp.label(), transformer.transform(lp.features())));
// $example off$
System.out.println("filtered data: ");
filteredData.foreach(System.out::println);
jsc.stop();
}
}
| 9,988 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark/examples | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingRegressionExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.mllib;
// $example on$
import java.util.HashMap;
import java.util.Map;
import scala.Tuple2;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.tree.GradientBoostedTrees;
import org.apache.spark.mllib.tree.configuration.BoostingStrategy;
import org.apache.spark.mllib.tree.model.GradientBoostedTreesModel;
import org.apache.spark.mllib.util.MLUtils;
// $example off$
public class JavaGradientBoostingRegressionExample {
public static void main(String[] args) {
// $example on$
SparkConf sparkConf = new SparkConf()
.setAppName("JavaGradientBoostedTreesRegressionExample");
JavaSparkContext jsc = new JavaSparkContext(sparkConf);
// Load and parse the data file.
String datapath = "data/mllib/sample_libsvm_data.txt";
JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(jsc.sc(), datapath).toJavaRDD();
// Split the data into training and test sets (30% held out for testing)
JavaRDD<LabeledPoint>[] splits = data.randomSplit(new double[]{0.7, 0.3});
JavaRDD<LabeledPoint> trainingData = splits[0];
JavaRDD<LabeledPoint> testData = splits[1];
// Train a GradientBoostedTrees model.
// The defaultParams for Regression use SquaredError by default.
BoostingStrategy boostingStrategy = BoostingStrategy.defaultParams("Regression");
boostingStrategy.setNumIterations(3); // Note: Use more iterations in practice.
boostingStrategy.getTreeStrategy().setMaxDepth(5);
// Empty categoricalFeaturesInfo indicates all features are continuous.
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
boostingStrategy.treeStrategy().setCategoricalFeaturesInfo(categoricalFeaturesInfo);
GradientBoostedTreesModel model = GradientBoostedTrees.train(trainingData, boostingStrategy);
// Evaluate model on test instances and compute test error
JavaPairRDD<Double, Double> predictionAndLabel =
testData.mapToPair(p -> new Tuple2<>(model.predict(p.features()), p.label()));
double testMSE = predictionAndLabel.mapToDouble(pl -> {
double diff = pl._1() - pl._2();
return diff * diff;
}).mean();
System.out.println("Test Mean Squared Error: " + testMSE);
System.out.println("Learned regression GBT model:\n" + model.toDebugString());
// Save and load model
model.save(jsc.sc(), "target/tmp/myGradientBoostingRegressionModel");
GradientBoostedTreesModel sameModel = GradientBoostedTreesModel.load(jsc.sc(),
"target/tmp/myGradientBoostingRegressionModel");
// $example off$
jsc.stop();
}
}
| 9,989 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark/examples | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/mllib/JavaLinearRegressionWithSGDExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.mllib;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
// $example on$
import scala.Tuple2;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.linalg.Vectors;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.regression.LinearRegressionModel;
import org.apache.spark.mllib.regression.LinearRegressionWithSGD;
// $example off$
/**
* Example for LinearRegressionWithSGD.
*/
public class JavaLinearRegressionWithSGDExample {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("JavaLinearRegressionWithSGDExample");
JavaSparkContext sc = new JavaSparkContext(conf);
// $example on$
// Load and parse the data
String path = "data/mllib/ridge-data/lpsa.data";
JavaRDD<String> data = sc.textFile(path);
JavaRDD<LabeledPoint> parsedData = data.map(line -> {
String[] parts = line.split(",");
String[] features = parts[1].split(" ");
double[] v = new double[features.length];
for (int i = 0; i < features.length - 1; i++) {
v[i] = Double.parseDouble(features[i]);
}
return new LabeledPoint(Double.parseDouble(parts[0]), Vectors.dense(v));
});
parsedData.cache();
// Building the model
int numIterations = 100;
double stepSize = 0.00000001;
LinearRegressionModel model =
LinearRegressionWithSGD.train(JavaRDD.toRDD(parsedData), numIterations, stepSize);
// Evaluate model on training examples and compute training error
JavaPairRDD<Double, Double> valuesAndPreds = parsedData.mapToPair(point ->
new Tuple2<>(model.predict(point.features()), point.label()));
double MSE = valuesAndPreds.mapToDouble(pair -> {
double diff = pair._1() - pair._2();
return diff * diff;
}).mean();
System.out.println("training Mean Squared Error = " + MSE);
// Save and load model
model.save(sc.sc(), "target/tmp/javaLinearRegressionWithSGDModel");
LinearRegressionModel sameModel = LinearRegressionModel.load(sc.sc(),
"target/tmp/javaLinearRegressionWithSGDModel");
// $example off$
sc.stop();
}
}
| 9,990 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark/examples | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/mllib/JavaLogisticRegressionWithLBFGSExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.mllib;
import org.apache.spark.SparkConf;
import org.apache.spark.SparkContext;
// $example on$
import scala.Tuple2;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.classification.LogisticRegressionModel;
import org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS;
import org.apache.spark.mllib.evaluation.MulticlassMetrics;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.util.MLUtils;
// $example off$
/**
* Example for LogisticRegressionWithLBFGS.
*/
public class JavaLogisticRegressionWithLBFGSExample {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("JavaLogisticRegressionWithLBFGSExample");
SparkContext sc = new SparkContext(conf);
// $example on$
String path = "data/mllib/sample_libsvm_data.txt";
JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(sc, path).toJavaRDD();
// Split initial RDD into two... [60% training data, 40% testing data].
JavaRDD<LabeledPoint>[] splits = data.randomSplit(new double[] {0.6, 0.4}, 11L);
JavaRDD<LabeledPoint> training = splits[0].cache();
JavaRDD<LabeledPoint> test = splits[1];
// Run training algorithm to build the model.
LogisticRegressionModel model = new LogisticRegressionWithLBFGS()
.setNumClasses(10)
.run(training.rdd());
// Compute raw scores on the test set.
JavaPairRDD<Object, Object> predictionAndLabels = test.mapToPair(p ->
new Tuple2<>(model.predict(p.features()), p.label()));
// Get evaluation metrics.
MulticlassMetrics metrics = new MulticlassMetrics(predictionAndLabels.rdd());
double accuracy = metrics.accuracy();
System.out.println("Accuracy = " + accuracy);
// Save and load model
model.save(sc, "target/tmp/javaLogisticRegressionWithLBFGSModel");
LogisticRegressionModel sameModel = LogisticRegressionModel.load(sc,
"target/tmp/javaLogisticRegressionWithLBFGSModel");
// $example off$
sc.stop();
}
}
| 9,991 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark/examples | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeRegressionExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.mllib;
// $example on$
import java.util.HashMap;
import java.util.Map;
import scala.Tuple2;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.tree.DecisionTree;
import org.apache.spark.mllib.tree.model.DecisionTreeModel;
import org.apache.spark.mllib.util.MLUtils;
// $example off$
class JavaDecisionTreeRegressionExample {
public static void main(String[] args) {
// $example on$
SparkConf sparkConf = new SparkConf().setAppName("JavaDecisionTreeRegressionExample");
JavaSparkContext jsc = new JavaSparkContext(sparkConf);
// Load and parse the data file.
String datapath = "data/mllib/sample_libsvm_data.txt";
JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(jsc.sc(), datapath).toJavaRDD();
// Split the data into training and test sets (30% held out for testing)
JavaRDD<LabeledPoint>[] splits = data.randomSplit(new double[]{0.7, 0.3});
JavaRDD<LabeledPoint> trainingData = splits[0];
JavaRDD<LabeledPoint> testData = splits[1];
// Set parameters.
// Empty categoricalFeaturesInfo indicates all features are continuous.
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
String impurity = "variance";
int maxDepth = 5;
int maxBins = 32;
// Train a DecisionTree model.
DecisionTreeModel model = DecisionTree.trainRegressor(trainingData,
categoricalFeaturesInfo, impurity, maxDepth, maxBins);
// Evaluate model on test instances and compute test error
JavaPairRDD<Double, Double> predictionAndLabel =
testData.mapToPair(p -> new Tuple2<>(model.predict(p.features()), p.label()));
double testMSE = predictionAndLabel.mapToDouble(pl -> {
double diff = pl._1() - pl._2();
return diff * diff;
}).mean();
System.out.println("Test Mean Squared Error: " + testMSE);
System.out.println("Learned regression tree model:\n" + model.toDebugString());
// Save and load model
model.save(jsc.sc(), "target/tmp/myDecisionTreeRegressionModel");
DecisionTreeModel sameModel = DecisionTreeModel
.load(jsc.sc(), "target/tmp/myDecisionTreeRegressionModel");
// $example off$
}
}
| 9,992 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark/examples | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/mllib/JavaBisectingKMeansExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.mllib;
// $example on$
import java.util.Arrays;
import java.util.List;
// $example off$
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
// $example on$
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.clustering.BisectingKMeans;
import org.apache.spark.mllib.clustering.BisectingKMeansModel;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.linalg.Vectors;
// $example off$
/**
* Java example for bisecting k-means clustering.
*/
public class JavaBisectingKMeansExample {
public static void main(String[] args) {
SparkConf sparkConf = new SparkConf().setAppName("JavaBisectingKMeansExample");
JavaSparkContext sc = new JavaSparkContext(sparkConf);
// $example on$
List<Vector> localData = Arrays.asList(
Vectors.dense(0.1, 0.1), Vectors.dense(0.3, 0.3),
Vectors.dense(10.1, 10.1), Vectors.dense(10.3, 10.3),
Vectors.dense(20.1, 20.1), Vectors.dense(20.3, 20.3),
Vectors.dense(30.1, 30.1), Vectors.dense(30.3, 30.3)
);
JavaRDD<Vector> data = sc.parallelize(localData, 2);
BisectingKMeans bkm = new BisectingKMeans()
.setK(4);
BisectingKMeansModel model = bkm.run(data);
System.out.println("Compute Cost: " + model.computeCost(data));
Vector[] clusterCenters = model.clusterCenters();
for (int i = 0; i < clusterCenters.length; i++) {
Vector clusterCenter = clusterCenters[i];
System.out.println("Cluster Center " + i + ": " + clusterCenter);
}
// $example off$
sc.stop();
}
}
| 9,993 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark/examples | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestRegressionExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.mllib;
// $example on$
import java.util.HashMap;
import java.util.Map;
import scala.Tuple2;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.tree.RandomForest;
import org.apache.spark.mllib.tree.model.RandomForestModel;
import org.apache.spark.mllib.util.MLUtils;
import org.apache.spark.SparkConf;
// $example off$
public class JavaRandomForestRegressionExample {
public static void main(String[] args) {
// $example on$
SparkConf sparkConf = new SparkConf().setAppName("JavaRandomForestRegressionExample");
JavaSparkContext jsc = new JavaSparkContext(sparkConf);
// Load and parse the data file.
String datapath = "data/mllib/sample_libsvm_data.txt";
JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(jsc.sc(), datapath).toJavaRDD();
// Split the data into training and test sets (30% held out for testing)
JavaRDD<LabeledPoint>[] splits = data.randomSplit(new double[]{0.7, 0.3});
JavaRDD<LabeledPoint> trainingData = splits[0];
JavaRDD<LabeledPoint> testData = splits[1];
// Set parameters.
// Empty categoricalFeaturesInfo indicates all features are continuous.
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
int numTrees = 3; // Use more in practice.
String featureSubsetStrategy = "auto"; // Let the algorithm choose.
String impurity = "variance";
int maxDepth = 4;
int maxBins = 32;
int seed = 12345;
// Train a RandomForest model.
RandomForestModel model = RandomForest.trainRegressor(trainingData,
categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity, maxDepth, maxBins, seed);
// Evaluate model on test instances and compute test error
JavaPairRDD<Double, Double> predictionAndLabel =
testData.mapToPair(p -> new Tuple2<>(model.predict(p.features()), p.label()));
double testMSE = predictionAndLabel.mapToDouble(pl -> {
double diff = pl._1() - pl._2();
return diff * diff;
}).mean();
System.out.println("Test Mean Squared Error: " + testMSE);
System.out.println("Learned regression forest model:\n" + model.toDebugString());
// Save and load model
model.save(jsc.sc(), "target/tmp/myRandomForestRegressionModel");
RandomForestModel sameModel = RandomForestModel.load(jsc.sc(),
"target/tmp/myRandomForestRegressionModel");
// $example off$
jsc.stop();
}
}
| 9,994 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark/examples | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/mllib/JavaLBFGSExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.mllib;
// $example on$
import java.util.Arrays;
import scala.Tuple2;
import org.apache.spark.api.java.*;
import org.apache.spark.mllib.classification.LogisticRegressionModel;
import org.apache.spark.mllib.evaluation.BinaryClassificationMetrics;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.linalg.Vectors;
import org.apache.spark.mllib.optimization.*;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.util.MLUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.SparkContext;
// $example off$
public class JavaLBFGSExample {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("L-BFGS Example");
SparkContext sc = new SparkContext(conf);
// $example on$
String path = "data/mllib/sample_libsvm_data.txt";
JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(sc, path).toJavaRDD();
int numFeatures = data.take(1).get(0).features().size();
// Split initial RDD into two... [60% training data, 40% testing data].
JavaRDD<LabeledPoint> trainingInit = data.sample(false, 0.6, 11L);
JavaRDD<LabeledPoint> test = data.subtract(trainingInit);
// Append 1 into the training data as intercept.
JavaPairRDD<Object, Vector> training = data.mapToPair(p ->
new Tuple2<>(p.label(), MLUtils.appendBias(p.features())));
training.cache();
// Run training algorithm to build the model.
int numCorrections = 10;
double convergenceTol = 1e-4;
int maxNumIterations = 20;
double regParam = 0.1;
Vector initialWeightsWithIntercept = Vectors.dense(new double[numFeatures + 1]);
Tuple2<Vector, double[]> result = LBFGS.runLBFGS(
training.rdd(),
new LogisticGradient(),
new SquaredL2Updater(),
numCorrections,
convergenceTol,
maxNumIterations,
regParam,
initialWeightsWithIntercept);
Vector weightsWithIntercept = result._1();
double[] loss = result._2();
LogisticRegressionModel model = new LogisticRegressionModel(
Vectors.dense(Arrays.copyOf(weightsWithIntercept.toArray(), weightsWithIntercept.size() - 1)),
(weightsWithIntercept.toArray())[weightsWithIntercept.size() - 1]);
// Clear the default threshold.
model.clearThreshold();
// Compute raw scores on the test set.
JavaPairRDD<Object, Object> scoreAndLabels = test.mapToPair(p ->
new Tuple2<>(model.predict(p.features()), p.label()));
// Get evaluation metrics.
BinaryClassificationMetrics metrics =
new BinaryClassificationMetrics(scoreAndLabels.rdd());
double auROC = metrics.areaUnderROC();
System.out.println("Loss of each step in training process");
for (double l : loss) {
System.out.println(l);
}
System.out.println("Area under ROC = " + auROC);
// $example off$
sc.stop();
}
}
| 9,995 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark/examples | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/mllib/JavaHypothesisTestingExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.mllib;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
// $example on$
import java.util.Arrays;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.linalg.Matrices;
import org.apache.spark.mllib.linalg.Matrix;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.linalg.Vectors;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.stat.Statistics;
import org.apache.spark.mllib.stat.test.ChiSqTestResult;
// $example off$
public class JavaHypothesisTestingExample {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("JavaHypothesisTestingExample");
JavaSparkContext jsc = new JavaSparkContext(conf);
// $example on$
// a vector composed of the frequencies of events
Vector vec = Vectors.dense(0.1, 0.15, 0.2, 0.3, 0.25);
// compute the goodness of fit. If a second vector to test against is not supplied
// as a parameter, the test runs against a uniform distribution.
ChiSqTestResult goodnessOfFitTestResult = Statistics.chiSqTest(vec);
// summary of the test including the p-value, degrees of freedom, test statistic,
// the method used, and the null hypothesis.
System.out.println(goodnessOfFitTestResult + "\n");
// Create a contingency matrix ((1.0, 2.0), (3.0, 4.0), (5.0, 6.0))
Matrix mat = Matrices.dense(3, 2, new double[]{1.0, 3.0, 5.0, 2.0, 4.0, 6.0});
// conduct Pearson's independence test on the input contingency matrix
ChiSqTestResult independenceTestResult = Statistics.chiSqTest(mat);
// summary of the test including the p-value, degrees of freedom...
System.out.println(independenceTestResult + "\n");
// an RDD of labeled points
JavaRDD<LabeledPoint> obs = jsc.parallelize(
Arrays.asList(
new LabeledPoint(1.0, Vectors.dense(1.0, 0.0, 3.0)),
new LabeledPoint(1.0, Vectors.dense(1.0, 2.0, 0.0)),
new LabeledPoint(-1.0, Vectors.dense(-1.0, 0.0, -0.5))
)
);
// The contingency table is constructed from the raw (label, feature) pairs and used to conduct
// the independence test. Returns an array containing the ChiSquaredTestResult for every feature
// against the label.
ChiSqTestResult[] featureTestResults = Statistics.chiSqTest(obs.rdd());
int i = 1;
for (ChiSqTestResult result : featureTestResults) {
System.out.println("Column " + i + ":");
System.out.println(result + "\n"); // summary of the test
i++;
}
// $example off$
jsc.stop();
}
}
| 9,996 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark/examples | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/mllib/JavaMultiLabelClassificationMetricsExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.mllib;
// $example on$
import java.util.Arrays;
import java.util.List;
import scala.Tuple2;
import org.apache.spark.api.java.*;
import org.apache.spark.mllib.evaluation.MultilabelMetrics;
import org.apache.spark.SparkConf;
// $example off$
public class JavaMultiLabelClassificationMetricsExample {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("Multilabel Classification Metrics Example");
JavaSparkContext sc = new JavaSparkContext(conf);
// $example on$
List<Tuple2<double[], double[]>> data = Arrays.asList(
new Tuple2<>(new double[]{0.0, 1.0}, new double[]{0.0, 2.0}),
new Tuple2<>(new double[]{0.0, 2.0}, new double[]{0.0, 1.0}),
new Tuple2<>(new double[]{}, new double[]{0.0}),
new Tuple2<>(new double[]{2.0}, new double[]{2.0}),
new Tuple2<>(new double[]{2.0, 0.0}, new double[]{2.0, 0.0}),
new Tuple2<>(new double[]{0.0, 1.0, 2.0}, new double[]{0.0, 1.0}),
new Tuple2<>(new double[]{1.0}, new double[]{1.0, 2.0})
);
JavaRDD<Tuple2<double[], double[]>> scoreAndLabels = sc.parallelize(data);
// Instantiate metrics object
MultilabelMetrics metrics = new MultilabelMetrics(scoreAndLabels.rdd());
// Summary stats
System.out.format("Recall = %f\n", metrics.recall());
System.out.format("Precision = %f\n", metrics.precision());
System.out.format("F1 measure = %f\n", metrics.f1Measure());
System.out.format("Accuracy = %f\n", metrics.accuracy());
// Stats by labels
for (int i = 0; i < metrics.labels().length - 1; i++) {
System.out.format("Class %1.1f precision = %f\n", metrics.labels()[i], metrics.precision(
metrics.labels()[i]));
System.out.format("Class %1.1f recall = %f\n", metrics.labels()[i], metrics.recall(
metrics.labels()[i]));
System.out.format("Class %1.1f F1 score = %f\n", metrics.labels()[i], metrics.f1Measure(
metrics.labels()[i]));
}
// Micro stats
System.out.format("Micro recall = %f\n", metrics.microRecall());
System.out.format("Micro precision = %f\n", metrics.microPrecision());
System.out.format("Micro F1 measure = %f\n", metrics.microF1Measure());
// Hamming loss
System.out.format("Hamming loss = %f\n", metrics.hammingLoss());
// Subset accuracy
System.out.format("Subset accuracy = %f\n", metrics.subsetAccuracy());
// $example off$
sc.stop();
}
}
| 9,997 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark/examples | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/mllib/JavaMulticlassClassificationMetricsExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.mllib;
// $example on$
import scala.Tuple2;
import org.apache.spark.api.java.*;
import org.apache.spark.mllib.classification.LogisticRegressionModel;
import org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS;
import org.apache.spark.mllib.evaluation.MulticlassMetrics;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.util.MLUtils;
import org.apache.spark.mllib.linalg.Matrix;
// $example off$
import org.apache.spark.SparkConf;
import org.apache.spark.SparkContext;
public class JavaMulticlassClassificationMetricsExample {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("Multi class Classification Metrics Example");
SparkContext sc = new SparkContext(conf);
// $example on$
String path = "data/mllib/sample_multiclass_classification_data.txt";
JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(sc, path).toJavaRDD();
// Split initial RDD into two... [60% training data, 40% testing data].
JavaRDD<LabeledPoint>[] splits = data.randomSplit(new double[]{0.6, 0.4}, 11L);
JavaRDD<LabeledPoint> training = splits[0].cache();
JavaRDD<LabeledPoint> test = splits[1];
// Run training algorithm to build the model.
LogisticRegressionModel model = new LogisticRegressionWithLBFGS()
.setNumClasses(3)
.run(training.rdd());
// Compute raw scores on the test set.
JavaPairRDD<Object, Object> predictionAndLabels = test.mapToPair(p ->
new Tuple2<>(model.predict(p.features()), p.label()));
// Get evaluation metrics.
MulticlassMetrics metrics = new MulticlassMetrics(predictionAndLabels.rdd());
// Confusion matrix
Matrix confusion = metrics.confusionMatrix();
System.out.println("Confusion matrix: \n" + confusion);
// Overall statistics
System.out.println("Accuracy = " + metrics.accuracy());
// Stats by labels
for (int i = 0; i < metrics.labels().length; i++) {
System.out.format("Class %f precision = %f\n", metrics.labels()[i],metrics.precision(
metrics.labels()[i]));
System.out.format("Class %f recall = %f\n", metrics.labels()[i], metrics.recall(
metrics.labels()[i]));
System.out.format("Class %f F1 score = %f\n", metrics.labels()[i], metrics.fMeasure(
metrics.labels()[i]));
}
//Weighted stats
System.out.format("Weighted precision = %f\n", metrics.weightedPrecision());
System.out.format("Weighted recall = %f\n", metrics.weightedRecall());
System.out.format("Weighted F1 score = %f\n", metrics.weightedFMeasure());
System.out.format("Weighted false positive rate = %f\n", metrics.weightedFalsePositiveRate());
// Save and load model
model.save(sc, "target/tmp/LogisticRegressionModel");
LogisticRegressionModel sameModel = LogisticRegressionModel.load(sc,
"target/tmp/LogisticRegressionModel");
// $example off$
sc.stop();
}
}
| 9,998 |
0 | Create_ds/spark/examples/src/main/java/org/apache/spark/examples | Create_ds/spark/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeClassificationExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.mllib;
// $example on$
import java.util.HashMap;
import java.util.Map;
import scala.Tuple2;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.tree.DecisionTree;
import org.apache.spark.mllib.tree.model.DecisionTreeModel;
import org.apache.spark.mllib.util.MLUtils;
// $example off$
class JavaDecisionTreeClassificationExample {
public static void main(String[] args) {
// $example on$
SparkConf sparkConf = new SparkConf().setAppName("JavaDecisionTreeClassificationExample");
JavaSparkContext jsc = new JavaSparkContext(sparkConf);
// Load and parse the data file.
String datapath = "data/mllib/sample_libsvm_data.txt";
JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(jsc.sc(), datapath).toJavaRDD();
// Split the data into training and test sets (30% held out for testing)
JavaRDD<LabeledPoint>[] splits = data.randomSplit(new double[]{0.7, 0.3});
JavaRDD<LabeledPoint> trainingData = splits[0];
JavaRDD<LabeledPoint> testData = splits[1];
// Set parameters.
// Empty categoricalFeaturesInfo indicates all features are continuous.
int numClasses = 2;
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
String impurity = "gini";
int maxDepth = 5;
int maxBins = 32;
// Train a DecisionTree model for classification.
DecisionTreeModel model = DecisionTree.trainClassifier(trainingData, numClasses,
categoricalFeaturesInfo, impurity, maxDepth, maxBins);
// Evaluate model on test instances and compute test error
JavaPairRDD<Double, Double> predictionAndLabel =
testData.mapToPair(p -> new Tuple2<>(model.predict(p.features()), p.label()));
double testErr =
predictionAndLabel.filter(pl -> !pl._1().equals(pl._2())).count() / (double) testData.count();
System.out.println("Test Error: " + testErr);
System.out.println("Learned classification tree model:\n" + model.toDebugString());
// Save and load model
model.save(jsc.sc(), "target/tmp/myDecisionTreeClassificationModel");
DecisionTreeModel sameModel = DecisionTreeModel
.load(jsc.sc(), "target/tmp/myDecisionTreeClassificationModel");
// $example off$
}
}
| 9,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.