index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network/protocol/MessageWithHeaderSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.WritableByteChannel;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.CompositeByteBuf;
import io.netty.buffer.Unpooled;
import org.apache.spark.network.util.AbstractFileRegion;
import org.junit.Test;
import org.mockito.Mockito;
import static org.junit.Assert.*;
import org.apache.spark.network.TestManagedBuffer;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NettyManagedBuffer;
import org.apache.spark.network.util.ByteArrayWritableChannel;
public class MessageWithHeaderSuite {
@Test
public void testSingleWrite() throws Exception {
testFileRegionBody(8, 8);
}
@Test
public void testShortWrite() throws Exception {
testFileRegionBody(8, 1);
}
@Test
public void testByteBufBody() throws Exception {
testByteBufBody(Unpooled.copyLong(42));
}
@Test
public void testCompositeByteBufBodySingleBuffer() throws Exception {
ByteBuf header = Unpooled.copyLong(42);
CompositeByteBuf compositeByteBuf = Unpooled.compositeBuffer();
compositeByteBuf.addComponent(true, header);
assertEquals(1, compositeByteBuf.nioBufferCount());
testByteBufBody(compositeByteBuf);
}
@Test
public void testCompositeByteBufBodyMultipleBuffers() throws Exception {
ByteBuf header = Unpooled.copyLong(42);
CompositeByteBuf compositeByteBuf = Unpooled.compositeBuffer();
compositeByteBuf.addComponent(true, header.retainedSlice(0, 4));
compositeByteBuf.addComponent(true, header.slice(4, 4));
assertEquals(2, compositeByteBuf.nioBufferCount());
testByteBufBody(compositeByteBuf);
}
/**
* Test writing a {@link MessageWithHeader} using the given {@link ByteBuf} as header.
*
* @param header the header to use.
* @throws Exception thrown on error.
*/
private void testByteBufBody(ByteBuf header) throws Exception {
long expectedHeaderValue = header.getLong(header.readerIndex());
ByteBuf bodyPassedToNettyManagedBuffer = Unpooled.copyLong(84);
assertEquals(1, header.refCnt());
assertEquals(1, bodyPassedToNettyManagedBuffer.refCnt());
ManagedBuffer managedBuf = new NettyManagedBuffer(bodyPassedToNettyManagedBuffer);
Object body = managedBuf.convertToNetty();
assertEquals(2, bodyPassedToNettyManagedBuffer.refCnt());
assertEquals(1, header.refCnt());
MessageWithHeader msg = new MessageWithHeader(managedBuf, header, body, managedBuf.size());
ByteBuf result = doWrite(msg, 1);
assertEquals(msg.count(), result.readableBytes());
assertEquals(expectedHeaderValue, result.readLong());
assertEquals(84, result.readLong());
assertTrue(msg.release());
assertEquals(0, bodyPassedToNettyManagedBuffer.refCnt());
assertEquals(0, header.refCnt());
}
@Test
public void testDeallocateReleasesManagedBuffer() throws Exception {
ByteBuf header = Unpooled.copyLong(42);
ManagedBuffer managedBuf = Mockito.spy(new TestManagedBuffer(84));
ByteBuf body = (ByteBuf) managedBuf.convertToNetty();
assertEquals(2, body.refCnt());
MessageWithHeader msg = new MessageWithHeader(managedBuf, header, body, body.readableBytes());
assertTrue(msg.release());
Mockito.verify(managedBuf, Mockito.times(1)).release();
assertEquals(0, body.refCnt());
}
private void testFileRegionBody(int totalWrites, int writesPerCall) throws Exception {
ByteBuf header = Unpooled.copyLong(42);
int headerLength = header.readableBytes();
TestFileRegion region = new TestFileRegion(totalWrites, writesPerCall);
MessageWithHeader msg = new MessageWithHeader(null, header, region, region.count());
ByteBuf result = doWrite(msg, totalWrites / writesPerCall);
assertEquals(headerLength + region.count(), result.readableBytes());
assertEquals(42, result.readLong());
for (long i = 0; i < 8; i++) {
assertEquals(i, result.readLong());
}
assertTrue(msg.release());
}
private ByteBuf doWrite(MessageWithHeader msg, int minExpectedWrites) throws Exception {
int writes = 0;
ByteArrayWritableChannel channel = new ByteArrayWritableChannel((int) msg.count());
while (msg.transfered() < msg.count()) {
msg.transferTo(channel, msg.transfered());
writes++;
}
assertTrue("Not enough writes!", minExpectedWrites <= writes);
return Unpooled.wrappedBuffer(channel.getData());
}
private static class TestFileRegion extends AbstractFileRegion {
private final int writeCount;
private final int writesPerCall;
private int written;
TestFileRegion(int totalWrites, int writesPerCall) {
this.writeCount = totalWrites;
this.writesPerCall = writesPerCall;
}
@Override
public long count() {
return 8 * writeCount;
}
@Override
public long position() {
return 0;
}
@Override
public long transferred() {
return 8 * written;
}
@Override
public long transferTo(WritableByteChannel target, long position) throws IOException {
for (int i = 0; i < writesPerCall; i++) {
ByteBuf buf = Unpooled.copyLong((position / 8) + i);
ByteBuffer nio = buf.nioBuffer();
while (nio.remaining() > 0) {
target.write(nio);
}
buf.release();
written++;
}
return 8 * writesPerCall;
}
@Override
protected void deallocate() {
}
}
}
| 9,800 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/TransportContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network;
import java.util.ArrayList;
import java.util.List;
import io.netty.channel.Channel;
import io.netty.channel.socket.SocketChannel;
import io.netty.handler.timeout.IdleStateHandler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.client.TransportClient;
import org.apache.spark.network.client.TransportClientBootstrap;
import org.apache.spark.network.client.TransportClientFactory;
import org.apache.spark.network.client.TransportResponseHandler;
import org.apache.spark.network.protocol.MessageDecoder;
import org.apache.spark.network.protocol.MessageEncoder;
import org.apache.spark.network.server.RpcHandler;
import org.apache.spark.network.server.TransportChannelHandler;
import org.apache.spark.network.server.TransportRequestHandler;
import org.apache.spark.network.server.TransportServer;
import org.apache.spark.network.server.TransportServerBootstrap;
import org.apache.spark.network.util.NettyUtils;
import org.apache.spark.network.util.TransportConf;
import org.apache.spark.network.util.TransportFrameDecoder;
/**
* Contains the context to create a {@link TransportServer}, {@link TransportClientFactory}, and to
* setup Netty Channel pipelines with a
* {@link org.apache.spark.network.server.TransportChannelHandler}.
*
* There are two communication protocols that the TransportClient provides, control-plane RPCs and
* data-plane "chunk fetching". The handling of the RPCs is performed outside of the scope of the
* TransportContext (i.e., by a user-provided handler), and it is responsible for setting up streams
* which can be streamed through the data plane in chunks using zero-copy IO.
*
* The TransportServer and TransportClientFactory both create a TransportChannelHandler for each
* channel. As each TransportChannelHandler contains a TransportClient, this enables server
* processes to send messages back to the client on an existing channel.
*/
public class TransportContext {
private static final Logger logger = LoggerFactory.getLogger(TransportContext.class);
private final TransportConf conf;
private final RpcHandler rpcHandler;
private final boolean closeIdleConnections;
/**
* Force to create MessageEncoder and MessageDecoder so that we can make sure they will be created
* before switching the current context class loader to ExecutorClassLoader.
*
* Netty's MessageToMessageEncoder uses Javassist to generate a matcher class and the
* implementation calls "Class.forName" to check if this calls is already generated. If the
* following two objects are created in "ExecutorClassLoader.findClass", it will cause
* "ClassCircularityError". This is because loading this Netty generated class will call
* "ExecutorClassLoader.findClass" to search this class, and "ExecutorClassLoader" will try to use
* RPC to load it and cause to load the non-exist matcher class again. JVM will report
* `ClassCircularityError` to prevent such infinite recursion. (See SPARK-17714)
*/
private static final MessageEncoder ENCODER = MessageEncoder.INSTANCE;
private static final MessageDecoder DECODER = MessageDecoder.INSTANCE;
public TransportContext(TransportConf conf, RpcHandler rpcHandler) {
this(conf, rpcHandler, false);
}
public TransportContext(
TransportConf conf,
RpcHandler rpcHandler,
boolean closeIdleConnections) {
this.conf = conf;
this.rpcHandler = rpcHandler;
this.closeIdleConnections = closeIdleConnections;
}
/**
* Initializes a ClientFactory which runs the given TransportClientBootstraps prior to returning
* a new Client. Bootstraps will be executed synchronously, and must run successfully in order
* to create a Client.
*/
public TransportClientFactory createClientFactory(List<TransportClientBootstrap> bootstraps) {
return new TransportClientFactory(this, bootstraps);
}
public TransportClientFactory createClientFactory() {
return createClientFactory(new ArrayList<>());
}
/** Create a server which will attempt to bind to a specific port. */
public TransportServer createServer(int port, List<TransportServerBootstrap> bootstraps) {
return new TransportServer(this, null, port, rpcHandler, bootstraps);
}
/** Create a server which will attempt to bind to a specific host and port. */
public TransportServer createServer(
String host, int port, List<TransportServerBootstrap> bootstraps) {
return new TransportServer(this, host, port, rpcHandler, bootstraps);
}
/** Creates a new server, binding to any available ephemeral port. */
public TransportServer createServer(List<TransportServerBootstrap> bootstraps) {
return createServer(0, bootstraps);
}
public TransportServer createServer() {
return createServer(0, new ArrayList<>());
}
public TransportChannelHandler initializePipeline(SocketChannel channel) {
return initializePipeline(channel, rpcHandler);
}
/**
* Initializes a client or server Netty Channel Pipeline which encodes/decodes messages and
* has a {@link org.apache.spark.network.server.TransportChannelHandler} to handle request or
* response messages.
*
* @param channel The channel to initialize.
* @param channelRpcHandler The RPC handler to use for the channel.
*
* @return Returns the created TransportChannelHandler, which includes a TransportClient that can
* be used to communicate on this channel. The TransportClient is directly associated with a
* ChannelHandler to ensure all users of the same channel get the same TransportClient object.
*/
public TransportChannelHandler initializePipeline(
SocketChannel channel,
RpcHandler channelRpcHandler) {
try {
TransportChannelHandler channelHandler = createChannelHandler(channel, channelRpcHandler);
channel.pipeline()
.addLast("encoder", ENCODER)
.addLast(TransportFrameDecoder.HANDLER_NAME, NettyUtils.createFrameDecoder())
.addLast("decoder", DECODER)
.addLast("idleStateHandler", new IdleStateHandler(0, 0, conf.connectionTimeoutMs() / 1000))
// NOTE: Chunks are currently guaranteed to be returned in the order of request, but this
// would require more logic to guarantee if this were not part of the same event loop.
.addLast("handler", channelHandler);
return channelHandler;
} catch (RuntimeException e) {
logger.error("Error while initializing Netty pipeline", e);
throw e;
}
}
/**
* Creates the server- and client-side handler which is used to handle both RequestMessages and
* ResponseMessages. The channel is expected to have been successfully created, though certain
* properties (such as the remoteAddress()) may not be available yet.
*/
private TransportChannelHandler createChannelHandler(Channel channel, RpcHandler rpcHandler) {
TransportResponseHandler responseHandler = new TransportResponseHandler(channel);
TransportClient client = new TransportClient(channel, responseHandler);
TransportRequestHandler requestHandler = new TransportRequestHandler(channel, client,
rpcHandler, conf.maxChunksBeingTransferred());
return new TransportChannelHandler(client, responseHandler, requestHandler,
conf.connectionTimeoutMs(), closeIdleConnections);
}
public TransportConf getConf() { return conf; }
}
| 9,801 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/crypto/TransportCipher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.crypto;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.ReadableByteChannel;
import java.nio.channels.WritableByteChannel;
import java.util.Properties;
import javax.crypto.spec.SecretKeySpec;
import javax.crypto.spec.IvParameterSpec;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.*;
import org.apache.commons.crypto.stream.CryptoInputStream;
import org.apache.commons.crypto.stream.CryptoOutputStream;
import org.apache.spark.network.util.AbstractFileRegion;
import org.apache.spark.network.util.ByteArrayReadableChannel;
import org.apache.spark.network.util.ByteArrayWritableChannel;
/**
* Cipher for encryption and decryption.
*/
public class TransportCipher {
@VisibleForTesting
static final String ENCRYPTION_HANDLER_NAME = "TransportEncryption";
private static final String DECRYPTION_HANDLER_NAME = "TransportDecryption";
@VisibleForTesting
static final int STREAM_BUFFER_SIZE = 1024 * 32;
private final Properties conf;
private final String cipher;
private final SecretKeySpec key;
private final byte[] inIv;
private final byte[] outIv;
public TransportCipher(
Properties conf,
String cipher,
SecretKeySpec key,
byte[] inIv,
byte[] outIv) {
this.conf = conf;
this.cipher = cipher;
this.key = key;
this.inIv = inIv;
this.outIv = outIv;
}
public String getCipherTransformation() {
return cipher;
}
@VisibleForTesting
SecretKeySpec getKey() {
return key;
}
/** The IV for the input channel (i.e. output channel of the remote side). */
public byte[] getInputIv() {
return inIv;
}
/** The IV for the output channel (i.e. input channel of the remote side). */
public byte[] getOutputIv() {
return outIv;
}
@VisibleForTesting
CryptoOutputStream createOutputStream(WritableByteChannel ch) throws IOException {
return new CryptoOutputStream(cipher, conf, ch, key, new IvParameterSpec(outIv));
}
private CryptoInputStream createInputStream(ReadableByteChannel ch) throws IOException {
return new CryptoInputStream(cipher, conf, ch, key, new IvParameterSpec(inIv));
}
/**
* Add handlers to channel.
*
* @param ch the channel for adding handlers
* @throws IOException
*/
public void addToChannel(Channel ch) throws IOException {
ch.pipeline()
.addFirst(ENCRYPTION_HANDLER_NAME, new EncryptionHandler(this))
.addFirst(DECRYPTION_HANDLER_NAME, new DecryptionHandler(this));
}
@VisibleForTesting
static class EncryptionHandler extends ChannelOutboundHandlerAdapter {
private final ByteArrayWritableChannel byteChannel;
private final CryptoOutputStream cos;
private boolean isCipherValid;
EncryptionHandler(TransportCipher cipher) throws IOException {
byteChannel = new ByteArrayWritableChannel(STREAM_BUFFER_SIZE);
cos = cipher.createOutputStream(byteChannel);
isCipherValid = true;
}
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise)
throws Exception {
ctx.write(createEncryptedMessage(msg), promise);
}
@VisibleForTesting
EncryptedMessage createEncryptedMessage(Object msg) {
return new EncryptedMessage(this, cos, msg, byteChannel);
}
@Override
public void close(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception {
try {
if (isCipherValid) {
cos.close();
}
} finally {
super.close(ctx, promise);
}
}
/**
* SPARK-25535. Workaround for CRYPTO-141. Avoid further interaction with the underlying cipher
* after an error occurs.
*/
void reportError() {
this.isCipherValid = false;
}
boolean isCipherValid() {
return isCipherValid;
}
}
private static class DecryptionHandler extends ChannelInboundHandlerAdapter {
private final CryptoInputStream cis;
private final ByteArrayReadableChannel byteChannel;
private boolean isCipherValid;
DecryptionHandler(TransportCipher cipher) throws IOException {
byteChannel = new ByteArrayReadableChannel();
cis = cipher.createInputStream(byteChannel);
isCipherValid = true;
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object data) throws Exception {
if (!isCipherValid) {
throw new IOException("Cipher is in invalid state.");
}
byteChannel.feedData((ByteBuf) data);
byte[] decryptedData = new byte[byteChannel.readableBytes()];
int offset = 0;
while (offset < decryptedData.length) {
// SPARK-25535: workaround for CRYPTO-141.
try {
offset += cis.read(decryptedData, offset, decryptedData.length - offset);
} catch (InternalError ie) {
isCipherValid = false;
throw ie;
}
}
ctx.fireChannelRead(Unpooled.wrappedBuffer(decryptedData, 0, decryptedData.length));
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
try {
if (isCipherValid) {
cis.close();
}
} finally {
super.channelInactive(ctx);
}
}
}
@VisibleForTesting
static class EncryptedMessage extends AbstractFileRegion {
private final boolean isByteBuf;
private final ByteBuf buf;
private final FileRegion region;
private final long count;
private final CryptoOutputStream cos;
private final EncryptionHandler handler;
private long transferred;
// Due to streaming issue CRYPTO-125: https://issues.apache.org/jira/browse/CRYPTO-125, it has
// to utilize two helper ByteArrayWritableChannel for streaming. One is used to receive raw data
// from upper handler, another is used to store encrypted data.
private ByteArrayWritableChannel byteEncChannel;
private ByteArrayWritableChannel byteRawChannel;
private ByteBuffer currentEncrypted;
EncryptedMessage(
EncryptionHandler handler,
CryptoOutputStream cos,
Object msg,
ByteArrayWritableChannel ch) {
Preconditions.checkArgument(msg instanceof ByteBuf || msg instanceof FileRegion,
"Unrecognized message type: %s", msg.getClass().getName());
this.handler = handler;
this.isByteBuf = msg instanceof ByteBuf;
this.buf = isByteBuf ? (ByteBuf) msg : null;
this.region = isByteBuf ? null : (FileRegion) msg;
this.transferred = 0;
this.byteRawChannel = new ByteArrayWritableChannel(STREAM_BUFFER_SIZE);
this.cos = cos;
this.byteEncChannel = ch;
this.count = isByteBuf ? buf.readableBytes() : region.count();
}
@Override
public long count() {
return count;
}
@Override
public long position() {
return 0;
}
@Override
public long transferred() {
return transferred;
}
@Override
public EncryptedMessage touch(Object o) {
super.touch(o);
if (region != null) {
region.touch(o);
}
if (buf != null) {
buf.touch(o);
}
return this;
}
@Override
public EncryptedMessage retain(int increment) {
super.retain(increment);
if (region != null) {
region.retain(increment);
}
if (buf != null) {
buf.retain(increment);
}
return this;
}
@Override
public boolean release(int decrement) {
if (region != null) {
region.release(decrement);
}
if (buf != null) {
buf.release(decrement);
}
return super.release(decrement);
}
@Override
public long transferTo(WritableByteChannel target, long position) throws IOException {
Preconditions.checkArgument(position == transferred(), "Invalid position.");
if (transferred == count) {
return 0;
}
long totalBytesWritten = 0L;
do {
if (currentEncrypted == null) {
encryptMore();
}
long remaining = currentEncrypted.remaining();
if (remaining == 0) {
// Just for safety to avoid endless loop. It usually won't happen, but since the
// underlying `region.transferTo` is allowed to transfer 0 bytes, we should handle it for
// safety.
currentEncrypted = null;
byteEncChannel.reset();
return totalBytesWritten;
}
long bytesWritten = target.write(currentEncrypted);
totalBytesWritten += bytesWritten;
transferred += bytesWritten;
if (bytesWritten < remaining) {
// break as the underlying buffer in "target" is full
break;
}
currentEncrypted = null;
byteEncChannel.reset();
} while (transferred < count);
return totalBytesWritten;
}
private void encryptMore() throws IOException {
if (!handler.isCipherValid()) {
throw new IOException("Cipher is in invalid state.");
}
byteRawChannel.reset();
if (isByteBuf) {
int copied = byteRawChannel.write(buf.nioBuffer());
buf.skipBytes(copied);
} else {
region.transferTo(byteRawChannel, region.transferred());
}
try {
cos.write(byteRawChannel.getData(), 0, byteRawChannel.length());
cos.flush();
} catch (InternalError ie) {
handler.reportError();
throw ie;
}
currentEncrypted = ByteBuffer.wrap(byteEncChannel.getData(),
0, byteEncChannel.length());
}
@Override
protected void deallocate() {
byteRawChannel.reset();
byteEncChannel.reset();
if (region != null) {
region.release();
}
if (buf != null) {
buf.release();
}
}
}
}
| 9,802 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/crypto/AuthRpcHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.crypto;
import java.nio.ByteBuffer;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.Channel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.client.RpcResponseCallback;
import org.apache.spark.network.client.StreamCallbackWithID;
import org.apache.spark.network.client.TransportClient;
import org.apache.spark.network.sasl.SecretKeyHolder;
import org.apache.spark.network.sasl.SaslRpcHandler;
import org.apache.spark.network.server.RpcHandler;
import org.apache.spark.network.server.StreamManager;
import org.apache.spark.network.util.TransportConf;
/**
* RPC Handler which performs authentication using Spark's auth protocol before delegating to a
* child RPC handler. If the configuration allows, this handler will delegate messages to a SASL
* RPC handler for further authentication, to support for clients that do not support Spark's
* protocol.
*
* The delegate will only receive messages if the given connection has been successfully
* authenticated. A connection may be authenticated at most once.
*/
class AuthRpcHandler extends RpcHandler {
private static final Logger LOG = LoggerFactory.getLogger(AuthRpcHandler.class);
/** Transport configuration. */
private final TransportConf conf;
/** The client channel. */
private final Channel channel;
/**
* RpcHandler we will delegate to for authenticated connections. When falling back to SASL
* this will be replaced with the SASL RPC handler.
*/
@VisibleForTesting
RpcHandler delegate;
/** Class which provides secret keys which are shared by server and client on a per-app basis. */
private final SecretKeyHolder secretKeyHolder;
/** Whether auth is done and future calls should be delegated. */
@VisibleForTesting
boolean doDelegate;
AuthRpcHandler(
TransportConf conf,
Channel channel,
RpcHandler delegate,
SecretKeyHolder secretKeyHolder) {
this.conf = conf;
this.channel = channel;
this.delegate = delegate;
this.secretKeyHolder = secretKeyHolder;
}
@Override
public void receive(TransportClient client, ByteBuffer message, RpcResponseCallback callback) {
if (doDelegate) {
delegate.receive(client, message, callback);
return;
}
int position = message.position();
int limit = message.limit();
ClientChallenge challenge;
try {
challenge = ClientChallenge.decodeMessage(message);
LOG.debug("Received new auth challenge for client {}.", channel.remoteAddress());
} catch (RuntimeException e) {
if (conf.saslFallback()) {
LOG.warn("Failed to parse new auth challenge, reverting to SASL for client {}.",
channel.remoteAddress());
delegate = new SaslRpcHandler(conf, channel, delegate, secretKeyHolder);
message.position(position);
message.limit(limit);
delegate.receive(client, message, callback);
doDelegate = true;
} else {
LOG.debug("Unexpected challenge message from client {}, closing channel.",
channel.remoteAddress());
callback.onFailure(new IllegalArgumentException("Unknown challenge message."));
channel.close();
}
return;
}
// Here we have the client challenge, so perform the new auth protocol and set up the channel.
AuthEngine engine = null;
try {
String secret = secretKeyHolder.getSecretKey(challenge.appId);
Preconditions.checkState(secret != null,
"Trying to authenticate non-registered app %s.", challenge.appId);
LOG.debug("Authenticating challenge for app {}.", challenge.appId);
engine = new AuthEngine(challenge.appId, secret, conf);
ServerResponse response = engine.respond(challenge);
ByteBuf responseData = Unpooled.buffer(response.encodedLength());
response.encode(responseData);
callback.onSuccess(responseData.nioBuffer());
engine.sessionCipher().addToChannel(channel);
} catch (Exception e) {
// This is a fatal error: authentication has failed. Close the channel explicitly.
LOG.debug("Authentication failed for client {}, closing channel.", channel.remoteAddress());
callback.onFailure(new IllegalArgumentException("Authentication failed."));
channel.close();
return;
} finally {
if (engine != null) {
try {
engine.close();
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
}
LOG.debug("Authorization successful for client {}.", channel.remoteAddress());
doDelegate = true;
}
@Override
public void receive(TransportClient client, ByteBuffer message) {
delegate.receive(client, message);
}
@Override
public StreamCallbackWithID receiveStream(
TransportClient client,
ByteBuffer message,
RpcResponseCallback callback) {
return delegate.receiveStream(client, message, callback);
}
@Override
public StreamManager getStreamManager() {
return delegate.getStreamManager();
}
@Override
public void channelActive(TransportClient client) {
delegate.channelActive(client);
}
@Override
public void channelInactive(TransportClient client) {
delegate.channelInactive(client);
}
@Override
public void exceptionCaught(Throwable cause, TransportClient client) {
delegate.exceptionCaught(cause, client);
}
}
| 9,803 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/crypto/AuthClientBootstrap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.crypto;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.security.GeneralSecurityException;
import com.google.common.base.Throwables;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.Channel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.client.TransportClient;
import org.apache.spark.network.client.TransportClientBootstrap;
import org.apache.spark.network.sasl.SaslClientBootstrap;
import org.apache.spark.network.sasl.SecretKeyHolder;
import org.apache.spark.network.util.TransportConf;
/**
* Bootstraps a {@link TransportClient} by performing authentication using Spark's auth protocol.
*
* This bootstrap falls back to using the SASL bootstrap if the server throws an error during
* authentication, and the configuration allows it. This is used for backwards compatibility
* with external shuffle services that do not support the new protocol.
*
* It also automatically falls back to SASL if the new encryption backend is disabled, so that
* callers only need to install this bootstrap when authentication is enabled.
*/
public class AuthClientBootstrap implements TransportClientBootstrap {
private static final Logger LOG = LoggerFactory.getLogger(AuthClientBootstrap.class);
private final TransportConf conf;
private final String appId;
private final SecretKeyHolder secretKeyHolder;
public AuthClientBootstrap(
TransportConf conf,
String appId,
SecretKeyHolder secretKeyHolder) {
this.conf = conf;
// TODO: right now this behaves like the SASL backend, because when executors start up
// they don't necessarily know the app ID. So they send a hardcoded "user" that is defined
// in the SecurityManager, which will also always return the same secret (regardless of the
// user name). All that's needed here is for this "user" to match on both sides, since that's
// required by the protocol. At some point, though, it would be better for the actual app ID
// to be provided here.
this.appId = appId;
this.secretKeyHolder = secretKeyHolder;
}
@Override
public void doBootstrap(TransportClient client, Channel channel) {
if (!conf.encryptionEnabled()) {
LOG.debug("AES encryption disabled, using old auth protocol.");
doSaslAuth(client, channel);
return;
}
try {
doSparkAuth(client, channel);
} catch (GeneralSecurityException | IOException e) {
throw Throwables.propagate(e);
} catch (RuntimeException e) {
// There isn't a good exception that can be caught here to know whether it's really
// OK to switch back to SASL (because the server doesn't speak the new protocol). So
// try it anyway, and in the worst case things will fail again.
if (conf.saslFallback()) {
LOG.warn("New auth protocol failed, trying SASL.", e);
doSaslAuth(client, channel);
} else {
throw e;
}
}
}
private void doSparkAuth(TransportClient client, Channel channel)
throws GeneralSecurityException, IOException {
String secretKey = secretKeyHolder.getSecretKey(appId);
try (AuthEngine engine = new AuthEngine(appId, secretKey, conf)) {
ClientChallenge challenge = engine.challenge();
ByteBuf challengeData = Unpooled.buffer(challenge.encodedLength());
challenge.encode(challengeData);
ByteBuffer responseData =
client.sendRpcSync(challengeData.nioBuffer(), conf.authRTTimeoutMs());
ServerResponse response = ServerResponse.decodeMessage(responseData);
engine.validate(response);
engine.sessionCipher().addToChannel(channel);
}
}
private void doSaslAuth(TransportClient client, Channel channel) {
SaslClientBootstrap sasl = new SaslClientBootstrap(conf, appId, secretKeyHolder);
sasl.doBootstrap(client, channel);
}
}
| 9,804 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/crypto/AuthEngine.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.crypto;
import java.io.Closeable;
import java.io.IOException;
import java.math.BigInteger;
import java.security.GeneralSecurityException;
import java.util.Arrays;
import java.util.Properties;
import javax.crypto.Cipher;
import javax.crypto.SecretKey;
import javax.crypto.SecretKeyFactory;
import javax.crypto.ShortBufferException;
import javax.crypto.spec.IvParameterSpec;
import javax.crypto.spec.PBEKeySpec;
import javax.crypto.spec.SecretKeySpec;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.primitives.Bytes;
import org.apache.commons.crypto.cipher.CryptoCipher;
import org.apache.commons.crypto.cipher.CryptoCipherFactory;
import org.apache.commons.crypto.random.CryptoRandom;
import org.apache.commons.crypto.random.CryptoRandomFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.util.TransportConf;
/**
* A helper class for abstracting authentication and key negotiation details. This is used by
* both client and server sides, since the operations are basically the same.
*/
class AuthEngine implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(AuthEngine.class);
private static final BigInteger ONE = new BigInteger(new byte[] { 0x1 });
private final byte[] appId;
private final char[] secret;
private final TransportConf conf;
private final Properties cryptoConf;
private final CryptoRandom random;
private byte[] authNonce;
@VisibleForTesting
byte[] challenge;
private TransportCipher sessionCipher;
private CryptoCipher encryptor;
private CryptoCipher decryptor;
AuthEngine(String appId, String secret, TransportConf conf) throws GeneralSecurityException {
this.appId = appId.getBytes(UTF_8);
this.conf = conf;
this.cryptoConf = conf.cryptoConf();
this.secret = secret.toCharArray();
this.random = CryptoRandomFactory.getCryptoRandom(cryptoConf);
}
/**
* Create the client challenge.
*
* @return A challenge to be sent the remote side.
*/
ClientChallenge challenge() throws GeneralSecurityException {
this.authNonce = randomBytes(conf.encryptionKeyLength() / Byte.SIZE);
SecretKeySpec authKey = generateKey(conf.keyFactoryAlgorithm(), conf.keyFactoryIterations(),
authNonce, conf.encryptionKeyLength());
initializeForAuth(conf.cipherTransformation(), authNonce, authKey);
this.challenge = randomBytes(conf.encryptionKeyLength() / Byte.SIZE);
return new ClientChallenge(new String(appId, UTF_8),
conf.keyFactoryAlgorithm(),
conf.keyFactoryIterations(),
conf.cipherTransformation(),
conf.encryptionKeyLength(),
authNonce,
challenge(appId, authNonce, challenge));
}
/**
* Validates the client challenge, and create the encryption backend for the channel from the
* parameters sent by the client.
*
* @param clientChallenge The challenge from the client.
* @return A response to be sent to the client.
*/
ServerResponse respond(ClientChallenge clientChallenge)
throws GeneralSecurityException {
SecretKeySpec authKey = generateKey(clientChallenge.kdf, clientChallenge.iterations,
clientChallenge.nonce, clientChallenge.keyLength);
initializeForAuth(clientChallenge.cipher, clientChallenge.nonce, authKey);
byte[] challenge = validateChallenge(clientChallenge.nonce, clientChallenge.challenge);
byte[] response = challenge(appId, clientChallenge.nonce, rawResponse(challenge));
byte[] sessionNonce = randomBytes(conf.encryptionKeyLength() / Byte.SIZE);
byte[] inputIv = randomBytes(conf.ivLength());
byte[] outputIv = randomBytes(conf.ivLength());
SecretKeySpec sessionKey = generateKey(clientChallenge.kdf, clientChallenge.iterations,
sessionNonce, clientChallenge.keyLength);
this.sessionCipher = new TransportCipher(cryptoConf, clientChallenge.cipher, sessionKey,
inputIv, outputIv);
// Note the IVs are swapped in the response.
return new ServerResponse(response, encrypt(sessionNonce), encrypt(outputIv), encrypt(inputIv));
}
/**
* Validates the server response and initializes the cipher to use for the session.
*
* @param serverResponse The response from the server.
*/
void validate(ServerResponse serverResponse) throws GeneralSecurityException {
byte[] response = validateChallenge(authNonce, serverResponse.response);
byte[] expected = rawResponse(challenge);
Preconditions.checkArgument(Arrays.equals(expected, response));
byte[] nonce = decrypt(serverResponse.nonce);
byte[] inputIv = decrypt(serverResponse.inputIv);
byte[] outputIv = decrypt(serverResponse.outputIv);
SecretKeySpec sessionKey = generateKey(conf.keyFactoryAlgorithm(), conf.keyFactoryIterations(),
nonce, conf.encryptionKeyLength());
this.sessionCipher = new TransportCipher(cryptoConf, conf.cipherTransformation(), sessionKey,
inputIv, outputIv);
}
TransportCipher sessionCipher() {
Preconditions.checkState(sessionCipher != null);
return sessionCipher;
}
@Override
public void close() throws IOException {
// Close ciphers (by calling "doFinal()" with dummy data) and the random instance so that
// internal state is cleaned up. Error handling here is just for paranoia, and not meant to
// accurately report the errors when they happen.
RuntimeException error = null;
byte[] dummy = new byte[8];
if (encryptor != null) {
try {
doCipherOp(Cipher.ENCRYPT_MODE, dummy, true);
} catch (Exception e) {
error = new RuntimeException(e);
}
encryptor = null;
}
if (decryptor != null) {
try {
doCipherOp(Cipher.DECRYPT_MODE, dummy, true);
} catch (Exception e) {
error = new RuntimeException(e);
}
decryptor = null;
}
random.close();
if (error != null) {
throw error;
}
}
@VisibleForTesting
byte[] challenge(byte[] appId, byte[] nonce, byte[] challenge) throws GeneralSecurityException {
return encrypt(Bytes.concat(appId, nonce, challenge));
}
@VisibleForTesting
byte[] rawResponse(byte[] challenge) {
BigInteger orig = new BigInteger(challenge);
BigInteger response = orig.add(ONE);
return response.toByteArray();
}
private byte[] decrypt(byte[] in) throws GeneralSecurityException {
return doCipherOp(Cipher.DECRYPT_MODE, in, false);
}
private byte[] encrypt(byte[] in) throws GeneralSecurityException {
return doCipherOp(Cipher.ENCRYPT_MODE, in, false);
}
private void initializeForAuth(String cipher, byte[] nonce, SecretKeySpec key)
throws GeneralSecurityException {
// commons-crypto currently only supports ciphers that require an initial vector; so
// create a dummy vector so that we can initialize the ciphers. In the future, if
// different ciphers are supported, this will have to be configurable somehow.
byte[] iv = new byte[conf.ivLength()];
System.arraycopy(nonce, 0, iv, 0, Math.min(nonce.length, iv.length));
CryptoCipher _encryptor = CryptoCipherFactory.getCryptoCipher(cipher, cryptoConf);
_encryptor.init(Cipher.ENCRYPT_MODE, key, new IvParameterSpec(iv));
this.encryptor = _encryptor;
CryptoCipher _decryptor = CryptoCipherFactory.getCryptoCipher(cipher, cryptoConf);
_decryptor.init(Cipher.DECRYPT_MODE, key, new IvParameterSpec(iv));
this.decryptor = _decryptor;
}
/**
* Validates an encrypted challenge as defined in the protocol, and returns the byte array
* that corresponds to the actual challenge data.
*/
private byte[] validateChallenge(byte[] nonce, byte[] encryptedChallenge)
throws GeneralSecurityException {
byte[] challenge = decrypt(encryptedChallenge);
checkSubArray(appId, challenge, 0);
checkSubArray(nonce, challenge, appId.length);
return Arrays.copyOfRange(challenge, appId.length + nonce.length, challenge.length);
}
private SecretKeySpec generateKey(String kdf, int iterations, byte[] salt, int keyLength)
throws GeneralSecurityException {
SecretKeyFactory factory = SecretKeyFactory.getInstance(kdf);
PBEKeySpec spec = new PBEKeySpec(secret, salt, iterations, keyLength);
long start = System.nanoTime();
SecretKey key = factory.generateSecret(spec);
long end = System.nanoTime();
LOG.debug("Generated key with {} iterations in {} us.", conf.keyFactoryIterations(),
(end - start) / 1000);
return new SecretKeySpec(key.getEncoded(), conf.keyAlgorithm());
}
private byte[] doCipherOp(int mode, byte[] in, boolean isFinal)
throws GeneralSecurityException {
CryptoCipher cipher;
switch (mode) {
case Cipher.ENCRYPT_MODE:
cipher = encryptor;
break;
case Cipher.DECRYPT_MODE:
cipher = decryptor;
break;
default:
throw new IllegalArgumentException(String.valueOf(mode));
}
Preconditions.checkState(cipher != null, "Cipher is invalid because of previous error.");
try {
int scale = 1;
while (true) {
int size = in.length * scale;
byte[] buffer = new byte[size];
try {
int outSize = isFinal ? cipher.doFinal(in, 0, in.length, buffer, 0)
: cipher.update(in, 0, in.length, buffer, 0);
if (outSize != buffer.length) {
byte[] output = new byte[outSize];
System.arraycopy(buffer, 0, output, 0, output.length);
return output;
} else {
return buffer;
}
} catch (ShortBufferException e) {
// Try again with a bigger buffer.
scale *= 2;
}
}
} catch (InternalError ie) {
// SPARK-25535. The commons-cryto library will throw InternalError if something goes wrong,
// and leave bad state behind in the Java wrappers, so it's not safe to use them afterwards.
if (mode == Cipher.ENCRYPT_MODE) {
this.encryptor = null;
} else {
this.decryptor = null;
}
throw ie;
}
}
private byte[] randomBytes(int count) {
byte[] bytes = new byte[count];
random.nextBytes(bytes);
return bytes;
}
/** Checks that the "test" array is in the data array starting at the given offset. */
private void checkSubArray(byte[] test, byte[] data, int offset) {
Preconditions.checkArgument(data.length >= test.length + offset);
for (int i = 0; i < test.length; i++) {
Preconditions.checkArgument(test[i] == data[i + offset]);
}
}
}
| 9,805 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/crypto/AuthServerBootstrap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.crypto;
import io.netty.channel.Channel;
import org.apache.spark.network.sasl.SaslServerBootstrap;
import org.apache.spark.network.sasl.SecretKeyHolder;
import org.apache.spark.network.server.RpcHandler;
import org.apache.spark.network.server.TransportServerBootstrap;
import org.apache.spark.network.util.TransportConf;
/**
* A bootstrap which is executed on a TransportServer's client channel once a client connects
* to the server, enabling authentication using Spark's auth protocol (and optionally SASL for
* clients that don't support the new protocol).
*
* It also automatically falls back to SASL if the new encryption backend is disabled, so that
* callers only need to install this bootstrap when authentication is enabled.
*/
public class AuthServerBootstrap implements TransportServerBootstrap {
private final TransportConf conf;
private final SecretKeyHolder secretKeyHolder;
public AuthServerBootstrap(TransportConf conf, SecretKeyHolder secretKeyHolder) {
this.conf = conf;
this.secretKeyHolder = secretKeyHolder;
}
public RpcHandler doBootstrap(Channel channel, RpcHandler rpcHandler) {
if (!conf.encryptionEnabled()) {
TransportServerBootstrap sasl = new SaslServerBootstrap(conf, secretKeyHolder);
return sasl.doBootstrap(channel, rpcHandler);
}
return new AuthRpcHandler(conf, channel, rpcHandler, secretKeyHolder);
}
}
| 9,806 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/crypto/ClientChallenge.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.crypto;
import java.nio.ByteBuffer;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import org.apache.spark.network.protocol.Encodable;
import org.apache.spark.network.protocol.Encoders;
/**
* The client challenge message, used to initiate authentication.
*
* Please see crypto/README.md for more details of implementation.
*/
public class ClientChallenge implements Encodable {
/** Serialization tag used to catch incorrect payloads. */
private static final byte TAG_BYTE = (byte) 0xFA;
public final String appId;
public final String kdf;
public final int iterations;
public final String cipher;
public final int keyLength;
public final byte[] nonce;
public final byte[] challenge;
public ClientChallenge(
String appId,
String kdf,
int iterations,
String cipher,
int keyLength,
byte[] nonce,
byte[] challenge) {
this.appId = appId;
this.kdf = kdf;
this.iterations = iterations;
this.cipher = cipher;
this.keyLength = keyLength;
this.nonce = nonce;
this.challenge = challenge;
}
@Override
public int encodedLength() {
return 1 + 4 + 4 +
Encoders.Strings.encodedLength(appId) +
Encoders.Strings.encodedLength(kdf) +
Encoders.Strings.encodedLength(cipher) +
Encoders.ByteArrays.encodedLength(nonce) +
Encoders.ByteArrays.encodedLength(challenge);
}
@Override
public void encode(ByteBuf buf) {
buf.writeByte(TAG_BYTE);
Encoders.Strings.encode(buf, appId);
Encoders.Strings.encode(buf, kdf);
buf.writeInt(iterations);
Encoders.Strings.encode(buf, cipher);
buf.writeInt(keyLength);
Encoders.ByteArrays.encode(buf, nonce);
Encoders.ByteArrays.encode(buf, challenge);
}
public static ClientChallenge decodeMessage(ByteBuffer buffer) {
ByteBuf buf = Unpooled.wrappedBuffer(buffer);
if (buf.readByte() != TAG_BYTE) {
throw new IllegalArgumentException("Expected ClientChallenge, received something else.");
}
return new ClientChallenge(
Encoders.Strings.decode(buf),
Encoders.Strings.decode(buf),
buf.readInt(),
Encoders.Strings.decode(buf),
buf.readInt(),
Encoders.ByteArrays.decode(buf),
Encoders.ByteArrays.decode(buf));
}
}
| 9,807 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/crypto/ServerResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.crypto;
import java.nio.ByteBuffer;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import org.apache.spark.network.protocol.Encodable;
import org.apache.spark.network.protocol.Encoders;
/**
* Server's response to client's challenge.
*
* Please see crypto/README.md for more details.
*/
public class ServerResponse implements Encodable {
/** Serialization tag used to catch incorrect payloads. */
private static final byte TAG_BYTE = (byte) 0xFB;
public final byte[] response;
public final byte[] nonce;
public final byte[] inputIv;
public final byte[] outputIv;
public ServerResponse(
byte[] response,
byte[] nonce,
byte[] inputIv,
byte[] outputIv) {
this.response = response;
this.nonce = nonce;
this.inputIv = inputIv;
this.outputIv = outputIv;
}
@Override
public int encodedLength() {
return 1 +
Encoders.ByteArrays.encodedLength(response) +
Encoders.ByteArrays.encodedLength(nonce) +
Encoders.ByteArrays.encodedLength(inputIv) +
Encoders.ByteArrays.encodedLength(outputIv);
}
@Override
public void encode(ByteBuf buf) {
buf.writeByte(TAG_BYTE);
Encoders.ByteArrays.encode(buf, response);
Encoders.ByteArrays.encode(buf, nonce);
Encoders.ByteArrays.encode(buf, inputIv);
Encoders.ByteArrays.encode(buf, outputIv);
}
public static ServerResponse decodeMessage(ByteBuffer buffer) {
ByteBuf buf = Unpooled.wrappedBuffer(buffer);
if (buf.readByte() != TAG_BYTE) {
throw new IllegalArgumentException("Expected ServerResponse, received something else.");
}
return new ServerResponse(
Encoders.ByteArrays.decode(buf),
Encoders.ByteArrays.decode(buf),
Encoders.ByteArrays.decode(buf),
Encoders.ByteArrays.decode(buf));
}
}
| 9,808 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/util/IOMode.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.util;
/**
* Selector for which form of low-level IO we should use.
* NIO is always available, while EPOLL is only available on Linux.
* AUTO is used to select EPOLL if it's available, or NIO otherwise.
*/
public enum IOMode {
NIO, EPOLL
}
| 9,809 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/util/CryptoUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.util;
import java.util.Map;
import java.util.Properties;
/**
* Utility methods related to the commons-crypto library.
*/
public class CryptoUtils {
// The prefix for the configurations passing to Apache Commons Crypto library.
public static final String COMMONS_CRYPTO_CONFIG_PREFIX = "commons.crypto.";
/**
* Extract the commons-crypto configuration embedded in a list of config values.
*
* @param prefix Prefix in the given configuration that identifies the commons-crypto configs.
* @param conf List of configuration values.
*/
public static Properties toCryptoConf(String prefix, Iterable<Map.Entry<String, String>> conf) {
Properties props = new Properties();
for (Map.Entry<String, String> e : conf) {
String key = e.getKey();
if (key.startsWith(prefix)) {
props.setProperty(COMMONS_CRYPTO_CONFIG_PREFIX + key.substring(prefix.length()),
e.getValue());
}
}
return props;
}
}
| 9,810 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/util/MapConfigProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.util;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.NoSuchElementException;
/** ConfigProvider based on a Map (copied in the constructor). */
public class MapConfigProvider extends ConfigProvider {
public static final MapConfigProvider EMPTY = new MapConfigProvider(Collections.emptyMap());
private final Map<String, String> config;
public MapConfigProvider(Map<String, String> config) {
this.config = new HashMap<>(config);
}
@Override
public String get(String name) {
String value = config.get(name);
if (value == null) {
throw new NoSuchElementException(name);
}
return value;
}
@Override
public String get(String name, String defaultValue) {
String value = config.get(name);
return value == null ? defaultValue : value;
}
@Override
public Iterable<Map.Entry<String, String>> getAll() {
return config.entrySet();
}
}
| 9,811 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.util;
import java.util.Locale;
import java.util.Properties;
import com.google.common.primitives.Ints;
/**
* A central location that tracks all the settings we expose to users.
*/
public class TransportConf {
private final String SPARK_NETWORK_IO_MODE_KEY;
private final String SPARK_NETWORK_IO_PREFERDIRECTBUFS_KEY;
private final String SPARK_NETWORK_IO_CONNECTIONTIMEOUT_KEY;
private final String SPARK_NETWORK_IO_BACKLOG_KEY;
private final String SPARK_NETWORK_IO_NUMCONNECTIONSPERPEER_KEY;
private final String SPARK_NETWORK_IO_SERVERTHREADS_KEY;
private final String SPARK_NETWORK_IO_CLIENTTHREADS_KEY;
private final String SPARK_NETWORK_IO_RECEIVEBUFFER_KEY;
private final String SPARK_NETWORK_IO_SENDBUFFER_KEY;
private final String SPARK_NETWORK_SASL_TIMEOUT_KEY;
private final String SPARK_NETWORK_IO_MAXRETRIES_KEY;
private final String SPARK_NETWORK_IO_RETRYWAIT_KEY;
private final String SPARK_NETWORK_IO_LAZYFD_KEY;
private final String SPARK_NETWORK_VERBOSE_METRICS;
private final ConfigProvider conf;
private final String module;
public TransportConf(String module, ConfigProvider conf) {
this.module = module;
this.conf = conf;
SPARK_NETWORK_IO_MODE_KEY = getConfKey("io.mode");
SPARK_NETWORK_IO_PREFERDIRECTBUFS_KEY = getConfKey("io.preferDirectBufs");
SPARK_NETWORK_IO_CONNECTIONTIMEOUT_KEY = getConfKey("io.connectionTimeout");
SPARK_NETWORK_IO_BACKLOG_KEY = getConfKey("io.backLog");
SPARK_NETWORK_IO_NUMCONNECTIONSPERPEER_KEY = getConfKey("io.numConnectionsPerPeer");
SPARK_NETWORK_IO_SERVERTHREADS_KEY = getConfKey("io.serverThreads");
SPARK_NETWORK_IO_CLIENTTHREADS_KEY = getConfKey("io.clientThreads");
SPARK_NETWORK_IO_RECEIVEBUFFER_KEY = getConfKey("io.receiveBuffer");
SPARK_NETWORK_IO_SENDBUFFER_KEY = getConfKey("io.sendBuffer");
SPARK_NETWORK_SASL_TIMEOUT_KEY = getConfKey("sasl.timeout");
SPARK_NETWORK_IO_MAXRETRIES_KEY = getConfKey("io.maxRetries");
SPARK_NETWORK_IO_RETRYWAIT_KEY = getConfKey("io.retryWait");
SPARK_NETWORK_IO_LAZYFD_KEY = getConfKey("io.lazyFD");
SPARK_NETWORK_VERBOSE_METRICS = getConfKey("io.enableVerboseMetrics");
}
public int getInt(String name, int defaultValue) {
return conf.getInt(name, defaultValue);
}
public String get(String name, String defaultValue) {
return conf.get(name, defaultValue);
}
private String getConfKey(String suffix) {
return "spark." + module + "." + suffix;
}
public String getModuleName() {
return module;
}
/** IO mode: nio or epoll */
public String ioMode() {
return conf.get(SPARK_NETWORK_IO_MODE_KEY, "NIO").toUpperCase(Locale.ROOT);
}
/** If true, we will prefer allocating off-heap byte buffers within Netty. */
public boolean preferDirectBufs() {
return conf.getBoolean(SPARK_NETWORK_IO_PREFERDIRECTBUFS_KEY, true);
}
/** Connect timeout in milliseconds. Default 120 secs. */
public int connectionTimeoutMs() {
long defaultNetworkTimeoutS = JavaUtils.timeStringAsSec(
conf.get("spark.network.timeout", "120s"));
long defaultTimeoutMs = JavaUtils.timeStringAsSec(
conf.get(SPARK_NETWORK_IO_CONNECTIONTIMEOUT_KEY, defaultNetworkTimeoutS + "s")) * 1000;
return (int) defaultTimeoutMs;
}
/** Number of concurrent connections between two nodes for fetching data. */
public int numConnectionsPerPeer() {
return conf.getInt(SPARK_NETWORK_IO_NUMCONNECTIONSPERPEER_KEY, 1);
}
/** Requested maximum length of the queue of incoming connections. Default is 64. */
public int backLog() { return conf.getInt(SPARK_NETWORK_IO_BACKLOG_KEY, 64); }
/** Number of threads used in the server thread pool. Default to 0, which is 2x#cores. */
public int serverThreads() { return conf.getInt(SPARK_NETWORK_IO_SERVERTHREADS_KEY, 0); }
/** Number of threads used in the client thread pool. Default to 0, which is 2x#cores. */
public int clientThreads() { return conf.getInt(SPARK_NETWORK_IO_CLIENTTHREADS_KEY, 0); }
/**
* Receive buffer size (SO_RCVBUF).
* Note: the optimal size for receive buffer and send buffer should be
* latency * network_bandwidth.
* Assuming latency = 1ms, network_bandwidth = 10Gbps
* buffer size should be ~ 1.25MB
*/
public int receiveBuf() { return conf.getInt(SPARK_NETWORK_IO_RECEIVEBUFFER_KEY, -1); }
/** Send buffer size (SO_SNDBUF). */
public int sendBuf() { return conf.getInt(SPARK_NETWORK_IO_SENDBUFFER_KEY, -1); }
/** Timeout for a single round trip of auth message exchange, in milliseconds. */
public int authRTTimeoutMs() {
return (int) JavaUtils.timeStringAsSec(conf.get("spark.network.auth.rpcTimeout",
conf.get(SPARK_NETWORK_SASL_TIMEOUT_KEY, "30s"))) * 1000;
}
/**
* Max number of times we will try IO exceptions (such as connection timeouts) per request.
* If set to 0, we will not do any retries.
*/
public int maxIORetries() { return conf.getInt(SPARK_NETWORK_IO_MAXRETRIES_KEY, 3); }
/**
* Time (in milliseconds) that we will wait in order to perform a retry after an IOException.
* Only relevant if maxIORetries > 0.
*/
public int ioRetryWaitTimeMs() {
return (int) JavaUtils.timeStringAsSec(conf.get(SPARK_NETWORK_IO_RETRYWAIT_KEY, "5s")) * 1000;
}
/**
* Minimum size of a block that we should start using memory map rather than reading in through
* normal IO operations. This prevents Spark from memory mapping very small blocks. In general,
* memory mapping has high overhead for blocks close to or below the page size of the OS.
*/
public int memoryMapBytes() {
return Ints.checkedCast(JavaUtils.byteStringAsBytes(
conf.get("spark.storage.memoryMapThreshold", "2m")));
}
/**
* Whether to initialize FileDescriptor lazily or not. If true, file descriptors are
* created only when data is going to be transferred. This can reduce the number of open files.
*/
public boolean lazyFileDescriptor() {
return conf.getBoolean(SPARK_NETWORK_IO_LAZYFD_KEY, true);
}
/**
* Whether to track Netty memory detailed metrics. If true, the detailed metrics of Netty
* PoolByteBufAllocator will be gotten, otherwise only general memory usage will be tracked.
*/
public boolean verboseMetrics() {
return conf.getBoolean(SPARK_NETWORK_VERBOSE_METRICS, false);
}
/**
* Maximum number of retries when binding to a port before giving up.
*/
public int portMaxRetries() {
return conf.getInt("spark.port.maxRetries", 16);
}
/**
* Enables strong encryption. Also enables the new auth protocol, used to negotiate keys.
*/
public boolean encryptionEnabled() {
return conf.getBoolean("spark.network.crypto.enabled", false);
}
/**
* The cipher transformation to use for encrypting session data.
*/
public String cipherTransformation() {
return conf.get("spark.network.crypto.cipher", "AES/CTR/NoPadding");
}
/**
* The key generation algorithm. This should be an algorithm that accepts a "PBEKeySpec"
* as input. The default value (PBKDF2WithHmacSHA1) is available in Java 7.
*/
public String keyFactoryAlgorithm() {
return conf.get("spark.network.crypto.keyFactoryAlgorithm", "PBKDF2WithHmacSHA1");
}
/**
* How many iterations to run when generating keys.
*
* See some discussion about this at: http://security.stackexchange.com/q/3959
* The default value was picked for speed, since it assumes that the secret has good entropy
* (128 bits by default), which is not generally the case with user passwords.
*/
public int keyFactoryIterations() {
return conf.getInt("spark.network.crypto.keyFactoryIterations", 1024);
}
/**
* Encryption key length, in bits.
*/
public int encryptionKeyLength() {
return conf.getInt("spark.network.crypto.keyLength", 128);
}
/**
* Initial vector length, in bytes.
*/
public int ivLength() {
return conf.getInt("spark.network.crypto.ivLength", 16);
}
/**
* The algorithm for generated secret keys. Nobody should really need to change this,
* but configurable just in case.
*/
public String keyAlgorithm() {
return conf.get("spark.network.crypto.keyAlgorithm", "AES");
}
/**
* Whether to fall back to SASL if the new auth protocol fails. Enabled by default for
* backwards compatibility.
*/
public boolean saslFallback() {
return conf.getBoolean("spark.network.crypto.saslFallback", true);
}
/**
* Whether to enable SASL-based encryption when authenticating using SASL.
*/
public boolean saslEncryption() {
return conf.getBoolean("spark.authenticate.enableSaslEncryption", false);
}
/**
* Maximum number of bytes to be encrypted at a time when SASL encryption is used.
*/
public int maxSaslEncryptedBlockSize() {
return Ints.checkedCast(JavaUtils.byteStringAsBytes(
conf.get("spark.network.sasl.maxEncryptedBlockSize", "64k")));
}
/**
* Whether the server should enforce encryption on SASL-authenticated connections.
*/
public boolean saslServerAlwaysEncrypt() {
return conf.getBoolean("spark.network.sasl.serverAlwaysEncrypt", false);
}
/**
* The commons-crypto configuration for the module.
*/
public Properties cryptoConf() {
return CryptoUtils.toCryptoConf("spark.network.crypto.config.", conf.getAll());
}
/**
* The max number of chunks allowed to be transferred at the same time on shuffle service.
* Note that new incoming connections will be closed when the max number is hit. The client will
* retry according to the shuffle retry configs (see `spark.shuffle.io.maxRetries` and
* `spark.shuffle.io.retryWait`), if those limits are reached the task will fail with fetch
* failure.
*/
public long maxChunksBeingTransferred() {
return conf.getLong("spark.shuffle.maxChunksBeingTransferred", Long.MAX_VALUE);
}
}
| 9,812 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/util/ByteUnit.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.util;
public enum ByteUnit {
BYTE(1),
KiB(1024L),
MiB((long) Math.pow(1024L, 2L)),
GiB((long) Math.pow(1024L, 3L)),
TiB((long) Math.pow(1024L, 4L)),
PiB((long) Math.pow(1024L, 5L));
ByteUnit(long multiplier) {
this.multiplier = multiplier;
}
// Interpret the provided number (d) with suffix (u) as this unit type.
// E.g. KiB.interpret(1, MiB) interprets 1MiB as its KiB representation = 1024k
public long convertFrom(long d, ByteUnit u) {
return u.convertTo(d, this);
}
// Convert the provided number (d) interpreted as this unit type to unit type (u).
public long convertTo(long d, ByteUnit u) {
if (multiplier > u.multiplier) {
long ratio = multiplier / u.multiplier;
if (Long.MAX_VALUE / ratio < d) {
throw new IllegalArgumentException("Conversion of " + d + " exceeds Long.MAX_VALUE in "
+ name() + ". Try a larger unit (e.g. MiB instead of KiB)");
}
return d * ratio;
} else {
// Perform operations in this order to avoid potential overflow
// when computing d * multiplier
return d / (u.multiplier / multiplier);
}
}
public double toBytes(long d) {
if (d < 0) {
throw new IllegalArgumentException("Negative size value. Size must be positive: " + d);
}
return d * multiplier;
}
public long toKiB(long d) { return convertTo(d, KiB); }
public long toMiB(long d) { return convertTo(d, MiB); }
public long toGiB(long d) { return convertTo(d, GiB); }
public long toTiB(long d) { return convertTo(d, TiB); }
public long toPiB(long d) { return convertTo(d, PiB); }
private final long multiplier;
}
| 9,813 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/util/ConfigProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.util;
import java.util.Map;
import java.util.NoSuchElementException;
/**
* Provides a mechanism for constructing a {@link TransportConf} using some sort of configuration.
*/
public abstract class ConfigProvider {
/** Obtains the value of the given config, throws NoSuchElementException if it doesn't exist. */
public abstract String get(String name);
/** Returns all the config values in the provider. */
public abstract Iterable<Map.Entry<String, String>> getAll();
public String get(String name, String defaultValue) {
try {
return get(name);
} catch (NoSuchElementException e) {
return defaultValue;
}
}
public int getInt(String name, int defaultValue) {
return Integer.parseInt(get(name, Integer.toString(defaultValue)));
}
public long getLong(String name, long defaultValue) {
return Long.parseLong(get(name, Long.toString(defaultValue)));
}
public double getDouble(String name, double defaultValue) {
return Double.parseDouble(get(name, Double.toString(defaultValue)));
}
public boolean getBoolean(String name, boolean defaultValue) {
return Boolean.parseBoolean(get(name, Boolean.toString(defaultValue)));
}
}
| 9,814 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/util/ByteArrayWritableChannel.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.util;
import java.nio.ByteBuffer;
import java.nio.channels.WritableByteChannel;
/**
* A writable channel that stores the written data in a byte array in memory.
*/
public class ByteArrayWritableChannel implements WritableByteChannel {
private final byte[] data;
private int offset;
public ByteArrayWritableChannel(int size) {
this.data = new byte[size];
}
public byte[] getData() {
return data;
}
public int length() {
return offset;
}
/** Resets the channel so that writing to it will overwrite the existing buffer. */
public void reset() {
offset = 0;
}
/**
* Reads from the given buffer into the internal byte array.
*/
@Override
public int write(ByteBuffer src) {
int toTransfer = Math.min(src.remaining(), data.length - offset);
src.get(data, offset, toTransfer);
offset += toTransfer;
return toTransfer;
}
@Override
public void close() {
}
@Override
public boolean isOpen() {
return true;
}
}
| 9,815 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/util/LimitedInputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Based on LimitedInputStream.java from Google Guava
*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.util;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import com.google.common.base.Preconditions;
/**
* Wraps a {@link InputStream}, limiting the number of bytes which can be read.
*
* This code is from Guava's 14.0 source code, because there is no compatible way to
* use this functionality in both a Guava 11 environment and a Guava >14 environment.
*/
public final class LimitedInputStream extends FilterInputStream {
private final boolean closeWrappedStream;
private long left;
private long mark = -1;
public LimitedInputStream(InputStream in, long limit) {
this(in, limit, true);
}
/**
* Create a LimitedInputStream that will read {@code limit} bytes from {@code in}.
* <p>
* If {@code closeWrappedStream} is true, this will close {@code in} when it is closed.
* Otherwise, the stream is left open for reading its remaining content.
*
* @param in a {@link InputStream} to read from
* @param limit the number of bytes to read
* @param closeWrappedStream whether to close {@code in} when {@link #close} is called
*/
public LimitedInputStream(InputStream in, long limit, boolean closeWrappedStream) {
super(in);
this.closeWrappedStream = closeWrappedStream;
Preconditions.checkNotNull(in);
Preconditions.checkArgument(limit >= 0, "limit must be non-negative");
left = limit;
}
@Override public int available() throws IOException {
return (int) Math.min(in.available(), left);
}
// it's okay to mark even if mark isn't supported, as reset won't work
@Override public synchronized void mark(int readLimit) {
in.mark(readLimit);
mark = left;
}
@Override public int read() throws IOException {
if (left == 0) {
return -1;
}
int result = in.read();
if (result != -1) {
--left;
}
return result;
}
@Override public int read(byte[] b, int off, int len) throws IOException {
if (left == 0) {
return -1;
}
len = (int) Math.min(len, left);
int result = in.read(b, off, len);
if (result != -1) {
left -= result;
}
return result;
}
@Override public synchronized void reset() throws IOException {
if (!in.markSupported()) {
throw new IOException("Mark not supported");
}
if (mark == -1) {
throw new IOException("Mark not set");
}
in.reset();
left = mark;
}
@Override public long skip(long n) throws IOException {
n = Math.min(n, left);
long skipped = in.skip(n);
left -= skipped;
return skipped;
}
@Override
public void close() throws IOException {
if (closeWrappedStream) {
super.close();
}
}
}
| 9,816 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/util/AbstractFileRegion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.util;
import io.netty.channel.FileRegion;
import io.netty.util.AbstractReferenceCounted;
public abstract class AbstractFileRegion extends AbstractReferenceCounted implements FileRegion {
@Override
@SuppressWarnings("deprecation")
public final long transfered() {
return transferred();
}
@Override
public AbstractFileRegion retain() {
super.retain();
return this;
}
@Override
public AbstractFileRegion retain(int increment) {
super.retain(increment);
return this;
}
@Override
public AbstractFileRegion touch() {
super.touch();
return this;
}
@Override
public AbstractFileRegion touch(Object o) {
return this;
}
}
| 9,817 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/util/JavaUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.util;
import java.io.*;
import java.nio.ByteBuffer;
import java.nio.channels.ReadableByteChannel;
import java.nio.charset.StandardCharsets;
import java.util.Locale;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import io.netty.buffer.Unpooled;
import org.apache.commons.lang3.SystemUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* General utilities available in the network package. Many of these are sourced from Spark's
* own Utils, just accessible within this package.
*/
public class JavaUtils {
private static final Logger logger = LoggerFactory.getLogger(JavaUtils.class);
/**
* Define a default value for driver memory here since this value is referenced across the code
* base and nearly all files already use Utils.scala
*/
public static final long DEFAULT_DRIVER_MEM_MB = 1024;
/** Closes the given object, ignoring IOExceptions. */
public static void closeQuietly(Closeable closeable) {
try {
if (closeable != null) {
closeable.close();
}
} catch (IOException e) {
logger.error("IOException should not have been thrown.", e);
}
}
/** Returns a hash consistent with Spark's Utils.nonNegativeHash(). */
public static int nonNegativeHash(Object obj) {
if (obj == null) { return 0; }
int hash = obj.hashCode();
return hash != Integer.MIN_VALUE ? Math.abs(hash) : 0;
}
/**
* Convert the given string to a byte buffer. The resulting buffer can be
* converted back to the same string through {@link #bytesToString(ByteBuffer)}.
*/
public static ByteBuffer stringToBytes(String s) {
return Unpooled.wrappedBuffer(s.getBytes(StandardCharsets.UTF_8)).nioBuffer();
}
/**
* Convert the given byte buffer to a string. The resulting string can be
* converted back to the same byte buffer through {@link #stringToBytes(String)}.
*/
public static String bytesToString(ByteBuffer b) {
return Unpooled.wrappedBuffer(b).toString(StandardCharsets.UTF_8);
}
/**
* Delete a file or directory and its contents recursively.
* Don't follow directories if they are symlinks.
*
* @param file Input file / dir to be deleted
* @throws IOException if deletion is unsuccessful
*/
public static void deleteRecursively(File file) throws IOException {
deleteRecursively(file, null);
}
/**
* Delete a file or directory and its contents recursively.
* Don't follow directories if they are symlinks.
*
* @param file Input file / dir to be deleted
* @param filter A filename filter that make sure only files / dirs with the satisfied filenames
* are deleted.
* @throws IOException if deletion is unsuccessful
*/
public static void deleteRecursively(File file, FilenameFilter filter) throws IOException {
if (file == null) { return; }
// On Unix systems, use operating system command to run faster
// If that does not work out, fallback to the Java IO way
if (SystemUtils.IS_OS_UNIX && filter == null) {
try {
deleteRecursivelyUsingUnixNative(file);
return;
} catch (IOException e) {
logger.warn("Attempt to delete using native Unix OS command failed for path = {}. " +
"Falling back to Java IO way", file.getAbsolutePath(), e);
}
}
deleteRecursivelyUsingJavaIO(file, filter);
}
private static void deleteRecursivelyUsingJavaIO(
File file,
FilenameFilter filter) throws IOException {
if (file.isDirectory() && !isSymlink(file)) {
IOException savedIOException = null;
for (File child : listFilesSafely(file, filter)) {
try {
deleteRecursively(child, filter);
} catch (IOException e) {
// In case of multiple exceptions, only last one will be thrown
savedIOException = e;
}
}
if (savedIOException != null) {
throw savedIOException;
}
}
// Delete file only when it's a normal file or an empty directory.
if (file.isFile() || (file.isDirectory() && listFilesSafely(file, null).length == 0)) {
boolean deleted = file.delete();
// Delete can also fail if the file simply did not exist.
if (!deleted && file.exists()) {
throw new IOException("Failed to delete: " + file.getAbsolutePath());
}
}
}
private static void deleteRecursivelyUsingUnixNative(File file) throws IOException {
ProcessBuilder builder = new ProcessBuilder("rm", "-rf", file.getAbsolutePath());
Process process = null;
int exitCode = -1;
try {
// In order to avoid deadlocks, consume the stdout (and stderr) of the process
builder.redirectErrorStream(true);
builder.redirectOutput(new File("/dev/null"));
process = builder.start();
exitCode = process.waitFor();
} catch (Exception e) {
throw new IOException("Failed to delete: " + file.getAbsolutePath(), e);
} finally {
if (process != null) {
process.destroy();
}
}
if (exitCode != 0 || file.exists()) {
throw new IOException("Failed to delete: " + file.getAbsolutePath());
}
}
private static File[] listFilesSafely(File file, FilenameFilter filter) throws IOException {
if (file.exists()) {
File[] files = file.listFiles(filter);
if (files == null) {
throw new IOException("Failed to list files for dir: " + file);
}
return files;
} else {
return new File[0];
}
}
private static boolean isSymlink(File file) throws IOException {
Preconditions.checkNotNull(file);
File fileInCanonicalDir = null;
if (file.getParent() == null) {
fileInCanonicalDir = file;
} else {
fileInCanonicalDir = new File(file.getParentFile().getCanonicalFile(), file.getName());
}
return !fileInCanonicalDir.getCanonicalFile().equals(fileInCanonicalDir.getAbsoluteFile());
}
private static final ImmutableMap<String, TimeUnit> timeSuffixes =
ImmutableMap.<String, TimeUnit>builder()
.put("us", TimeUnit.MICROSECONDS)
.put("ms", TimeUnit.MILLISECONDS)
.put("s", TimeUnit.SECONDS)
.put("m", TimeUnit.MINUTES)
.put("min", TimeUnit.MINUTES)
.put("h", TimeUnit.HOURS)
.put("d", TimeUnit.DAYS)
.build();
private static final ImmutableMap<String, ByteUnit> byteSuffixes =
ImmutableMap.<String, ByteUnit>builder()
.put("b", ByteUnit.BYTE)
.put("k", ByteUnit.KiB)
.put("kb", ByteUnit.KiB)
.put("m", ByteUnit.MiB)
.put("mb", ByteUnit.MiB)
.put("g", ByteUnit.GiB)
.put("gb", ByteUnit.GiB)
.put("t", ByteUnit.TiB)
.put("tb", ByteUnit.TiB)
.put("p", ByteUnit.PiB)
.put("pb", ByteUnit.PiB)
.build();
/**
* Convert a passed time string (e.g. 50s, 100ms, or 250us) to a time count in the given unit.
* The unit is also considered the default if the given string does not specify a unit.
*/
public static long timeStringAs(String str, TimeUnit unit) {
String lower = str.toLowerCase(Locale.ROOT).trim();
try {
Matcher m = Pattern.compile("(-?[0-9]+)([a-z]+)?").matcher(lower);
if (!m.matches()) {
throw new NumberFormatException("Failed to parse time string: " + str);
}
long val = Long.parseLong(m.group(1));
String suffix = m.group(2);
// Check for invalid suffixes
if (suffix != null && !timeSuffixes.containsKey(suffix)) {
throw new NumberFormatException("Invalid suffix: \"" + suffix + "\"");
}
// If suffix is valid use that, otherwise none was provided and use the default passed
return unit.convert(val, suffix != null ? timeSuffixes.get(suffix) : unit);
} catch (NumberFormatException e) {
String timeError = "Time must be specified as seconds (s), " +
"milliseconds (ms), microseconds (us), minutes (m or min), hour (h), or day (d). " +
"E.g. 50s, 100ms, or 250us.";
throw new NumberFormatException(timeError + "\n" + e.getMessage());
}
}
/**
* Convert a time parameter such as (50s, 100ms, or 250us) to milliseconds for internal use. If
* no suffix is provided, the passed number is assumed to be in ms.
*/
public static long timeStringAsMs(String str) {
return timeStringAs(str, TimeUnit.MILLISECONDS);
}
/**
* Convert a time parameter such as (50s, 100ms, or 250us) to seconds for internal use. If
* no suffix is provided, the passed number is assumed to be in seconds.
*/
public static long timeStringAsSec(String str) {
return timeStringAs(str, TimeUnit.SECONDS);
}
/**
* Convert a passed byte string (e.g. 50b, 100kb, or 250mb) to the given. If no suffix is
* provided, a direct conversion to the provided unit is attempted.
*/
public static long byteStringAs(String str, ByteUnit unit) {
String lower = str.toLowerCase(Locale.ROOT).trim();
try {
Matcher m = Pattern.compile("([0-9]+)([a-z]+)?").matcher(lower);
Matcher fractionMatcher = Pattern.compile("([0-9]+\\.[0-9]+)([a-z]+)?").matcher(lower);
if (m.matches()) {
long val = Long.parseLong(m.group(1));
String suffix = m.group(2);
// Check for invalid suffixes
if (suffix != null && !byteSuffixes.containsKey(suffix)) {
throw new NumberFormatException("Invalid suffix: \"" + suffix + "\"");
}
// If suffix is valid use that, otherwise none was provided and use the default passed
return unit.convertFrom(val, suffix != null ? byteSuffixes.get(suffix) : unit);
} else if (fractionMatcher.matches()) {
throw new NumberFormatException("Fractional values are not supported. Input was: "
+ fractionMatcher.group(1));
} else {
throw new NumberFormatException("Failed to parse byte string: " + str);
}
} catch (NumberFormatException e) {
String byteError = "Size must be specified as bytes (b), " +
"kibibytes (k), mebibytes (m), gibibytes (g), tebibytes (t), or pebibytes(p). " +
"E.g. 50b, 100k, or 250m.";
throw new NumberFormatException(byteError + "\n" + e.getMessage());
}
}
/**
* Convert a passed byte string (e.g. 50b, 100k, or 250m) to bytes for
* internal use.
*
* If no suffix is provided, the passed number is assumed to be in bytes.
*/
public static long byteStringAsBytes(String str) {
return byteStringAs(str, ByteUnit.BYTE);
}
/**
* Convert a passed byte string (e.g. 50b, 100k, or 250m) to kibibytes for
* internal use.
*
* If no suffix is provided, the passed number is assumed to be in kibibytes.
*/
public static long byteStringAsKb(String str) {
return byteStringAs(str, ByteUnit.KiB);
}
/**
* Convert a passed byte string (e.g. 50b, 100k, or 250m) to mebibytes for
* internal use.
*
* If no suffix is provided, the passed number is assumed to be in mebibytes.
*/
public static long byteStringAsMb(String str) {
return byteStringAs(str, ByteUnit.MiB);
}
/**
* Convert a passed byte string (e.g. 50b, 100k, or 250m) to gibibytes for
* internal use.
*
* If no suffix is provided, the passed number is assumed to be in gibibytes.
*/
public static long byteStringAsGb(String str) {
return byteStringAs(str, ByteUnit.GiB);
}
/**
* Returns a byte array with the buffer's contents, trying to avoid copying the data if
* possible.
*/
public static byte[] bufferToArray(ByteBuffer buffer) {
if (buffer.hasArray() && buffer.arrayOffset() == 0 &&
buffer.array().length == buffer.remaining()) {
return buffer.array();
} else {
byte[] bytes = new byte[buffer.remaining()];
buffer.get(bytes);
return bytes;
}
}
/**
* Fills a buffer with data read from the channel.
*/
public static void readFully(ReadableByteChannel channel, ByteBuffer dst) throws IOException {
int expected = dst.remaining();
while (dst.hasRemaining()) {
if (channel.read(dst) < 0) {
throw new EOFException(String.format("Not enough bytes in channel (expected %d).",
expected));
}
}
}
}
| 9,818 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/util/ByteArrayReadableChannel.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.util;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.ReadableByteChannel;
import io.netty.buffer.ByteBuf;
public class ByteArrayReadableChannel implements ReadableByteChannel {
private ByteBuf data;
public int readableBytes() {
return data.readableBytes();
}
public void feedData(ByteBuf buf) {
data = buf;
}
@Override
public int read(ByteBuffer dst) throws IOException {
int totalRead = 0;
while (data.readableBytes() > 0 && dst.remaining() > 0) {
int bytesToRead = Math.min(data.readableBytes(), dst.remaining());
dst.put(data.readSlice(bytesToRead).nioBuffer());
totalRead += bytesToRead;
}
if (data.readableBytes() == 0) {
data.release();
}
return totalRead;
}
@Override
public void close() throws IOException {
}
@Override
public boolean isOpen() {
return true;
}
}
| 9,819 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/util/NettyMemoryMetrics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.util;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.util.*;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Metric;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.MetricSet;
import com.google.common.annotations.VisibleForTesting;
import io.netty.buffer.PoolArenaMetric;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.buffer.PooledByteBufAllocatorMetric;
/**
* A Netty memory metrics class to collect metrics from Netty PooledByteBufAllocator.
*/
public class NettyMemoryMetrics implements MetricSet {
private final PooledByteBufAllocator pooledAllocator;
private final boolean verboseMetricsEnabled;
private final Map<String, Metric> allMetrics;
private final String metricPrefix;
@VisibleForTesting
static final Set<String> VERBOSE_METRICS = new HashSet<>();
static {
VERBOSE_METRICS.addAll(Arrays.asList(
"numAllocations",
"numTinyAllocations",
"numSmallAllocations",
"numNormalAllocations",
"numHugeAllocations",
"numDeallocations",
"numTinyDeallocations",
"numSmallDeallocations",
"numNormalDeallocations",
"numHugeDeallocations",
"numActiveAllocations",
"numActiveTinyAllocations",
"numActiveSmallAllocations",
"numActiveNormalAllocations",
"numActiveHugeAllocations",
"numActiveBytes"));
}
public NettyMemoryMetrics(PooledByteBufAllocator pooledAllocator,
String metricPrefix,
TransportConf conf) {
this.pooledAllocator = pooledAllocator;
this.allMetrics = new HashMap<>();
this.metricPrefix = metricPrefix;
this.verboseMetricsEnabled = conf.verboseMetrics();
registerMetrics(this.pooledAllocator);
}
private void registerMetrics(PooledByteBufAllocator allocator) {
PooledByteBufAllocatorMetric pooledAllocatorMetric = allocator.metric();
// Register general metrics.
allMetrics.put(MetricRegistry.name(metricPrefix, "usedHeapMemory"),
(Gauge<Long>) () -> pooledAllocatorMetric.usedHeapMemory());
allMetrics.put(MetricRegistry.name(metricPrefix, "usedDirectMemory"),
(Gauge<Long>) () -> pooledAllocatorMetric.usedDirectMemory());
if (verboseMetricsEnabled) {
int directArenaIndex = 0;
for (PoolArenaMetric metric : pooledAllocatorMetric.directArenas()) {
registerArenaMetric(metric, "directArena" + directArenaIndex);
directArenaIndex++;
}
int heapArenaIndex = 0;
for (PoolArenaMetric metric : pooledAllocatorMetric.heapArenas()) {
registerArenaMetric(metric, "heapArena" + heapArenaIndex);
heapArenaIndex++;
}
}
}
private void registerArenaMetric(PoolArenaMetric arenaMetric, String arenaName) {
for (String methodName : VERBOSE_METRICS) {
Method m;
try {
m = PoolArenaMetric.class.getMethod(methodName);
} catch (Exception e) {
// Failed to find metric related method, ignore this metric.
continue;
}
if (!Modifier.isPublic(m.getModifiers())) {
// Ignore non-public methods.
continue;
}
Class<?> returnType = m.getReturnType();
String metricName = MetricRegistry.name(metricPrefix, arenaName, m.getName());
if (returnType.equals(int.class)) {
allMetrics.put(metricName, (Gauge<Integer>) () -> {
try {
return (Integer) m.invoke(arenaMetric);
} catch (Exception e) {
return -1; // Swallow the exceptions.
}
});
} else if (returnType.equals(long.class)) {
allMetrics.put(metricName, (Gauge<Long>) () -> {
try {
return (Long) m.invoke(arenaMetric);
} catch (Exception e) {
return -1L; // Swallow the exceptions.
}
});
}
}
}
@Override
public Map<String, Metric> getMetrics() {
return Collections.unmodifiableMap(allMetrics);
}
}
| 9,820 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/util/NettyUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.util;
import java.util.concurrent.ThreadFactory;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.channel.Channel;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.ServerChannel;
import io.netty.channel.epoll.EpollEventLoopGroup;
import io.netty.channel.epoll.EpollServerSocketChannel;
import io.netty.channel.epoll.EpollSocketChannel;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.util.concurrent.DefaultThreadFactory;
import io.netty.util.internal.PlatformDependent;
/**
* Utilities for creating various Netty constructs based on whether we're using EPOLL or NIO.
*/
public class NettyUtils {
/** Creates a new ThreadFactory which prefixes each thread with the given name. */
public static ThreadFactory createThreadFactory(String threadPoolPrefix) {
return new DefaultThreadFactory(threadPoolPrefix, true);
}
/** Creates a Netty EventLoopGroup based on the IOMode. */
public static EventLoopGroup createEventLoop(IOMode mode, int numThreads, String threadPrefix) {
ThreadFactory threadFactory = createThreadFactory(threadPrefix);
switch (mode) {
case NIO:
return new NioEventLoopGroup(numThreads, threadFactory);
case EPOLL:
return new EpollEventLoopGroup(numThreads, threadFactory);
default:
throw new IllegalArgumentException("Unknown io mode: " + mode);
}
}
/** Returns the correct (client) SocketChannel class based on IOMode. */
public static Class<? extends Channel> getClientChannelClass(IOMode mode) {
switch (mode) {
case NIO:
return NioSocketChannel.class;
case EPOLL:
return EpollSocketChannel.class;
default:
throw new IllegalArgumentException("Unknown io mode: " + mode);
}
}
/** Returns the correct ServerSocketChannel class based on IOMode. */
public static Class<? extends ServerChannel> getServerChannelClass(IOMode mode) {
switch (mode) {
case NIO:
return NioServerSocketChannel.class;
case EPOLL:
return EpollServerSocketChannel.class;
default:
throw new IllegalArgumentException("Unknown io mode: " + mode);
}
}
/**
* Creates a LengthFieldBasedFrameDecoder where the first 8 bytes are the length of the frame.
* This is used before all decoders.
*/
public static TransportFrameDecoder createFrameDecoder() {
return new TransportFrameDecoder();
}
/** Returns the remote address on the channel or "<unknown remote>" if none exists. */
public static String getRemoteAddress(Channel channel) {
if (channel != null && channel.remoteAddress() != null) {
return channel.remoteAddress().toString();
}
return "<unknown remote>";
}
/**
* Create a pooled ByteBuf allocator but disables the thread-local cache. Thread-local caches
* are disabled for TransportClients because the ByteBufs are allocated by the event loop thread,
* but released by the executor thread rather than the event loop thread. Those thread-local
* caches actually delay the recycling of buffers, leading to larger memory usage.
*/
public static PooledByteBufAllocator createPooledByteBufAllocator(
boolean allowDirectBufs,
boolean allowCache,
int numCores) {
if (numCores == 0) {
numCores = Runtime.getRuntime().availableProcessors();
}
return new PooledByteBufAllocator(
allowDirectBufs && PlatformDependent.directBufferPreferred(),
Math.min(PooledByteBufAllocator.defaultNumHeapArena(), numCores),
Math.min(PooledByteBufAllocator.defaultNumDirectArena(), allowDirectBufs ? numCores : 0),
PooledByteBufAllocator.defaultPageSize(),
PooledByteBufAllocator.defaultMaxOrder(),
allowCache ? PooledByteBufAllocator.defaultTinyCacheSize() : 0,
allowCache ? PooledByteBufAllocator.defaultSmallCacheSize() : 0,
allowCache ? PooledByteBufAllocator.defaultNormalCacheSize() : 0,
allowCache ? PooledByteBufAllocator.defaultUseCacheForAllThreads() : false
);
}
}
| 9,821 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/util/TransportFrameDecoder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.util;
import java.util.LinkedList;
import com.google.common.base.Preconditions;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.CompositeByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
/**
* A customized frame decoder that allows intercepting raw data.
* <p>
* This behaves like Netty's frame decoder (with hard coded parameters that match this library's
* needs), except it allows an interceptor to be installed to read data directly before it's
* framed.
* <p>
* Unlike Netty's frame decoder, each frame is dispatched to child handlers as soon as it's
* decoded, instead of building as many frames as the current buffer allows and dispatching
* all of them. This allows a child handler to install an interceptor if needed.
* <p>
* If an interceptor is installed, framing stops, and data is instead fed directly to the
* interceptor. When the interceptor indicates that it doesn't need to read any more data,
* framing resumes. Interceptors should not hold references to the data buffers provided
* to their handle() method.
*/
public class TransportFrameDecoder extends ChannelInboundHandlerAdapter {
public static final String HANDLER_NAME = "frameDecoder";
private static final int LENGTH_SIZE = 8;
private static final int MAX_FRAME_SIZE = Integer.MAX_VALUE;
private static final int UNKNOWN_FRAME_SIZE = -1;
private final LinkedList<ByteBuf> buffers = new LinkedList<>();
private final ByteBuf frameLenBuf = Unpooled.buffer(LENGTH_SIZE, LENGTH_SIZE);
private long totalSize = 0;
private long nextFrameSize = UNKNOWN_FRAME_SIZE;
private volatile Interceptor interceptor;
@Override
public void channelRead(ChannelHandlerContext ctx, Object data) throws Exception {
ByteBuf in = (ByteBuf) data;
buffers.add(in);
totalSize += in.readableBytes();
while (!buffers.isEmpty()) {
// First, feed the interceptor, and if it's still, active, try again.
if (interceptor != null) {
ByteBuf first = buffers.getFirst();
int available = first.readableBytes();
if (feedInterceptor(first)) {
assert !first.isReadable() : "Interceptor still active but buffer has data.";
}
int read = available - first.readableBytes();
if (read == available) {
buffers.removeFirst().release();
}
totalSize -= read;
} else {
// Interceptor is not active, so try to decode one frame.
ByteBuf frame = decodeNext();
if (frame == null) {
break;
}
ctx.fireChannelRead(frame);
}
}
}
private long decodeFrameSize() {
if (nextFrameSize != UNKNOWN_FRAME_SIZE || totalSize < LENGTH_SIZE) {
return nextFrameSize;
}
// We know there's enough data. If the first buffer contains all the data, great. Otherwise,
// hold the bytes for the frame length in a composite buffer until we have enough data to read
// the frame size. Normally, it should be rare to need more than one buffer to read the frame
// size.
ByteBuf first = buffers.getFirst();
if (first.readableBytes() >= LENGTH_SIZE) {
nextFrameSize = first.readLong() - LENGTH_SIZE;
totalSize -= LENGTH_SIZE;
if (!first.isReadable()) {
buffers.removeFirst().release();
}
return nextFrameSize;
}
while (frameLenBuf.readableBytes() < LENGTH_SIZE) {
ByteBuf next = buffers.getFirst();
int toRead = Math.min(next.readableBytes(), LENGTH_SIZE - frameLenBuf.readableBytes());
frameLenBuf.writeBytes(next, toRead);
if (!next.isReadable()) {
buffers.removeFirst().release();
}
}
nextFrameSize = frameLenBuf.readLong() - LENGTH_SIZE;
totalSize -= LENGTH_SIZE;
frameLenBuf.clear();
return nextFrameSize;
}
private ByteBuf decodeNext() {
long frameSize = decodeFrameSize();
if (frameSize == UNKNOWN_FRAME_SIZE || totalSize < frameSize) {
return null;
}
// Reset size for next frame.
nextFrameSize = UNKNOWN_FRAME_SIZE;
Preconditions.checkArgument(frameSize < MAX_FRAME_SIZE, "Too large frame: %s", frameSize);
Preconditions.checkArgument(frameSize > 0, "Frame length should be positive: %s", frameSize);
// If the first buffer holds the entire frame, return it.
int remaining = (int) frameSize;
if (buffers.getFirst().readableBytes() >= remaining) {
return nextBufferForFrame(remaining);
}
// Otherwise, create a composite buffer.
CompositeByteBuf frame = buffers.getFirst().alloc().compositeBuffer(Integer.MAX_VALUE);
while (remaining > 0) {
ByteBuf next = nextBufferForFrame(remaining);
remaining -= next.readableBytes();
frame.addComponent(next).writerIndex(frame.writerIndex() + next.readableBytes());
}
assert remaining == 0;
return frame;
}
/**
* Takes the first buffer in the internal list, and either adjust it to fit in the frame
* (by taking a slice out of it) or remove it from the internal list.
*/
private ByteBuf nextBufferForFrame(int bytesToRead) {
ByteBuf buf = buffers.getFirst();
ByteBuf frame;
if (buf.readableBytes() > bytesToRead) {
frame = buf.retain().readSlice(bytesToRead);
totalSize -= bytesToRead;
} else {
frame = buf;
buffers.removeFirst();
totalSize -= frame.readableBytes();
}
return frame;
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
for (ByteBuf b : buffers) {
b.release();
}
if (interceptor != null) {
interceptor.channelInactive();
}
frameLenBuf.release();
super.channelInactive(ctx);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
if (interceptor != null) {
interceptor.exceptionCaught(cause);
}
super.exceptionCaught(ctx, cause);
}
public void setInterceptor(Interceptor interceptor) {
Preconditions.checkState(this.interceptor == null, "Already have an interceptor.");
this.interceptor = interceptor;
}
/**
* @return Whether the interceptor is still active after processing the data.
*/
private boolean feedInterceptor(ByteBuf buf) throws Exception {
if (interceptor != null && !interceptor.handle(buf)) {
interceptor = null;
}
return interceptor != null;
}
public interface Interceptor {
/**
* Handles data received from the remote end.
*
* @param data Buffer containing data.
* @return "true" if the interceptor expects more data, "false" to uninstall the interceptor.
*/
boolean handle(ByteBuf data) throws Exception;
/** Called if an exception is thrown in the channel pipeline. */
void exceptionCaught(Throwable cause) throws Exception;
/** Called if the channel is closed and the interceptor is still installed. */
void channelInactive() throws Exception;
}
}
| 9,822 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/util/LevelDBProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.util;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.fusesource.leveldbjni.JniDBFactory;
import org.fusesource.leveldbjni.internal.NativeDB;
import org.iq80.leveldb.DB;
import org.iq80.leveldb.Options;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* LevelDB utility class available in the network package.
*/
public class LevelDBProvider {
private static final Logger logger = LoggerFactory.getLogger(LevelDBProvider.class);
public static DB initLevelDB(File dbFile, StoreVersion version, ObjectMapper mapper) throws
IOException {
DB tmpDb = null;
if (dbFile != null) {
Options options = new Options();
options.createIfMissing(false);
options.logger(new LevelDBLogger());
try {
tmpDb = JniDBFactory.factory.open(dbFile, options);
} catch (NativeDB.DBException e) {
if (e.isNotFound() || e.getMessage().contains(" does not exist ")) {
logger.info("Creating state database at " + dbFile);
options.createIfMissing(true);
try {
tmpDb = JniDBFactory.factory.open(dbFile, options);
} catch (NativeDB.DBException dbExc) {
throw new IOException("Unable to create state store", dbExc);
}
} else {
// the leveldb file seems to be corrupt somehow. Lets just blow it away and create a new
// one, so we can keep processing new apps
logger.error("error opening leveldb file {}. Creating new file, will not be able to " +
"recover state for existing applications", dbFile, e);
if (dbFile.isDirectory()) {
for (File f : dbFile.listFiles()) {
if (!f.delete()) {
logger.warn("error deleting {}", f.getPath());
}
}
}
if (!dbFile.delete()) {
logger.warn("error deleting {}", dbFile.getPath());
}
options.createIfMissing(true);
try {
tmpDb = JniDBFactory.factory.open(dbFile, options);
} catch (NativeDB.DBException dbExc) {
throw new IOException("Unable to create state store", dbExc);
}
}
}
// if there is a version mismatch, we throw an exception, which means the service is unusable
checkVersion(tmpDb, version, mapper);
}
return tmpDb;
}
private static class LevelDBLogger implements org.iq80.leveldb.Logger {
private static final Logger LOG = LoggerFactory.getLogger(LevelDBLogger.class);
@Override
public void log(String message) {
LOG.info(message);
}
}
/**
* Simple major.minor versioning scheme. Any incompatible changes should be across major
* versions. Minor version differences are allowed -- meaning we should be able to read
* dbs that are either earlier *or* later on the minor version.
*/
public static void checkVersion(DB db, StoreVersion newversion, ObjectMapper mapper) throws
IOException {
byte[] bytes = db.get(StoreVersion.KEY);
if (bytes == null) {
storeVersion(db, newversion, mapper);
} else {
StoreVersion version = mapper.readValue(bytes, StoreVersion.class);
if (version.major != newversion.major) {
throw new IOException("cannot read state DB with version " + version + ", incompatible " +
"with current version " + newversion);
}
storeVersion(db, newversion, mapper);
}
}
public static void storeVersion(DB db, StoreVersion version, ObjectMapper mapper)
throws IOException {
db.put(StoreVersion.KEY, mapper.writeValueAsBytes(version));
}
public static class StoreVersion {
static final byte[] KEY = "StoreVersion".getBytes(StandardCharsets.UTF_8);
public final int major;
public final int minor;
@JsonCreator
public StoreVersion(@JsonProperty("major") int major, @JsonProperty("minor") int minor) {
this.major = major;
this.minor = minor;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
StoreVersion that = (StoreVersion) o;
return major == that.major && minor == that.minor;
}
@Override
public int hashCode() {
int result = major;
result = 31 * result + minor;
return result;
}
}
}
| 9,823 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/sasl/SecretKeyHolder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.sasl;
/**
* Interface for getting a secret key associated with some application.
*/
public interface SecretKeyHolder {
/**
* Gets an appropriate SASL User for the given appId.
* @throws IllegalArgumentException if the given appId is not associated with a SASL user.
*/
String getSaslUser(String appId);
/**
* Gets an appropriate SASL secret key for the given appId.
* @throws IllegalArgumentException if the given appId is not associated with a SASL secret key.
*/
String getSecretKey(String appId);
}
| 9,824 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/sasl/SaslServerBootstrap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.sasl;
import io.netty.channel.Channel;
import org.apache.spark.network.server.RpcHandler;
import org.apache.spark.network.server.TransportServerBootstrap;
import org.apache.spark.network.util.TransportConf;
/**
* A bootstrap which is executed on a TransportServer's client channel once a client connects
* to the server. This allows customizing the client channel to allow for things such as SASL
* authentication.
*/
public class SaslServerBootstrap implements TransportServerBootstrap {
private final TransportConf conf;
private final SecretKeyHolder secretKeyHolder;
public SaslServerBootstrap(TransportConf conf, SecretKeyHolder secretKeyHolder) {
this.conf = conf;
this.secretKeyHolder = secretKeyHolder;
}
/**
* Wrap the given application handler in a SaslRpcHandler that will handle the initial SASL
* negotiation.
*/
public RpcHandler doBootstrap(Channel channel, RpcHandler rpcHandler) {
return new SaslRpcHandler(conf, channel, rpcHandler, secretKeyHolder);
}
}
| 9,825 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/sasl/SparkSaslServer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.sasl;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.NameCallback;
import javax.security.auth.callback.PasswordCallback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.sasl.AuthorizeCallback;
import javax.security.sasl.RealmCallback;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslException;
import javax.security.sasl.SaslServer;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.handler.codec.base64.Base64;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A SASL Server for Spark which simply keeps track of the state of a single SASL session, from the
* initial state to the "authenticated" state. (It is not a server in the sense of accepting
* connections on some socket.)
*/
public class SparkSaslServer implements SaslEncryptionBackend {
private static final Logger logger = LoggerFactory.getLogger(SparkSaslServer.class);
/**
* This is passed as the server name when creating the sasl client/server.
* This could be changed to be configurable in the future.
*/
static final String DEFAULT_REALM = "default";
/**
* The authentication mechanism used here is DIGEST-MD5. This could be changed to be
* configurable in the future.
*/
static final String DIGEST = "DIGEST-MD5";
/**
* Quality of protection value that includes encryption.
*/
static final String QOP_AUTH_CONF = "auth-conf";
/**
* Quality of protection value that does not include encryption.
*/
static final String QOP_AUTH = "auth";
/** Identifier for a certain secret key within the secretKeyHolder. */
private final String secretKeyId;
private final SecretKeyHolder secretKeyHolder;
private SaslServer saslServer;
public SparkSaslServer(
String secretKeyId,
SecretKeyHolder secretKeyHolder,
boolean alwaysEncrypt) {
this.secretKeyId = secretKeyId;
this.secretKeyHolder = secretKeyHolder;
// Sasl.QOP is a comma-separated list of supported values. The value that allows encryption
// is listed first since it's preferred over the non-encrypted one (if the client also
// lists both in the request).
String qop = alwaysEncrypt ? QOP_AUTH_CONF : String.format("%s,%s", QOP_AUTH_CONF, QOP_AUTH);
Map<String, String> saslProps = ImmutableMap.<String, String>builder()
.put(Sasl.SERVER_AUTH, "true")
.put(Sasl.QOP, qop)
.build();
try {
this.saslServer = Sasl.createSaslServer(DIGEST, null, DEFAULT_REALM, saslProps,
new DigestCallbackHandler());
} catch (SaslException e) {
throw Throwables.propagate(e);
}
}
/**
* Determines whether the authentication exchange has completed successfully.
*/
public synchronized boolean isComplete() {
return saslServer != null && saslServer.isComplete();
}
/** Returns the value of a negotiated property. */
public Object getNegotiatedProperty(String name) {
return saslServer.getNegotiatedProperty(name);
}
/**
* Used to respond to server SASL tokens.
* @param token Server's SASL token
* @return response to send back to the server.
*/
public synchronized byte[] response(byte[] token) {
try {
return saslServer != null ? saslServer.evaluateResponse(token) : new byte[0];
} catch (SaslException e) {
throw Throwables.propagate(e);
}
}
/**
* Disposes of any system resources or security-sensitive information the
* SaslServer might be using.
*/
@Override
public synchronized void dispose() {
if (saslServer != null) {
try {
saslServer.dispose();
} catch (SaslException e) {
// ignore
} finally {
saslServer = null;
}
}
}
@Override
public byte[] wrap(byte[] data, int offset, int len) throws SaslException {
return saslServer.wrap(data, offset, len);
}
@Override
public byte[] unwrap(byte[] data, int offset, int len) throws SaslException {
return saslServer.unwrap(data, offset, len);
}
/**
* Implementation of javax.security.auth.callback.CallbackHandler for SASL DIGEST-MD5 mechanism.
*/
private class DigestCallbackHandler implements CallbackHandler {
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
for (Callback callback : callbacks) {
if (callback instanceof NameCallback) {
logger.trace("SASL server callback: setting username");
NameCallback nc = (NameCallback) callback;
nc.setName(encodeIdentifier(secretKeyHolder.getSaslUser(secretKeyId)));
} else if (callback instanceof PasswordCallback) {
logger.trace("SASL server callback: setting password");
PasswordCallback pc = (PasswordCallback) callback;
pc.setPassword(encodePassword(secretKeyHolder.getSecretKey(secretKeyId)));
} else if (callback instanceof RealmCallback) {
logger.trace("SASL server callback: setting realm");
RealmCallback rc = (RealmCallback) callback;
rc.setText(rc.getDefaultText());
} else if (callback instanceof AuthorizeCallback) {
AuthorizeCallback ac = (AuthorizeCallback) callback;
String authId = ac.getAuthenticationID();
String authzId = ac.getAuthorizationID();
ac.setAuthorized(authId.equals(authzId));
if (ac.isAuthorized()) {
ac.setAuthorizedID(authzId);
}
logger.debug("SASL Authorization complete, authorized set to {}", ac.isAuthorized());
} else {
throw new UnsupportedCallbackException(callback, "Unrecognized SASL DIGEST-MD5 Callback");
}
}
}
}
/* Encode a byte[] identifier as a Base64-encoded string. */
public static String encodeIdentifier(String identifier) {
Preconditions.checkNotNull(identifier, "User cannot be null if SASL is enabled");
return getBase64EncodedString(identifier);
}
/** Encode a password as a base64-encoded char[] array. */
public static char[] encodePassword(String password) {
Preconditions.checkNotNull(password, "Password cannot be null if SASL is enabled");
return getBase64EncodedString(password).toCharArray();
}
/** Return a Base64-encoded string. */
private static String getBase64EncodedString(String str) {
ByteBuf byteBuf = null;
ByteBuf encodedByteBuf = null;
try {
byteBuf = Unpooled.wrappedBuffer(str.getBytes(StandardCharsets.UTF_8));
encodedByteBuf = Base64.encode(byteBuf);
return encodedByteBuf.toString(StandardCharsets.UTF_8);
} finally {
// The release is called to suppress the memory leak error messages raised by netty.
if (byteBuf != null) {
byteBuf.release();
if (encodedByteBuf != null) {
encodedByteBuf.release();
}
}
}
}
}
| 9,826 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/sasl/SaslMessage.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.sasl;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import org.apache.spark.network.buffer.NettyManagedBuffer;
import org.apache.spark.network.protocol.Encoders;
import org.apache.spark.network.protocol.AbstractMessage;
/**
* Encodes a Sasl-related message which is attempting to authenticate using some credentials tagged
* with the given appId. This appId allows a single SaslRpcHandler to multiplex different
* applications which may be using different sets of credentials.
*/
class SaslMessage extends AbstractMessage {
/** Serialization tag used to catch incorrect payloads. */
private static final byte TAG_BYTE = (byte) 0xEA;
public final String appId;
SaslMessage(String appId, byte[] message) {
this(appId, Unpooled.wrappedBuffer(message));
}
SaslMessage(String appId, ByteBuf message) {
super(new NettyManagedBuffer(message), true);
this.appId = appId;
}
@Override
public Type type() { return Type.User; }
@Override
public int encodedLength() {
// The integer (a.k.a. the body size) is not really used, since that information is already
// encoded in the frame length. But this maintains backwards compatibility with versions of
// RpcRequest that use Encoders.ByteArrays.
return 1 + Encoders.Strings.encodedLength(appId) + 4;
}
@Override
public void encode(ByteBuf buf) {
buf.writeByte(TAG_BYTE);
Encoders.Strings.encode(buf, appId);
// See comment in encodedLength().
buf.writeInt((int) body().size());
}
public static SaslMessage decode(ByteBuf buf) {
if (buf.readByte() != TAG_BYTE) {
throw new IllegalStateException("Expected SaslMessage, received something else"
+ " (maybe your client does not have SASL enabled?)");
}
String appId = Encoders.Strings.decode(buf);
// See comment in encodedLength().
buf.readInt();
return new SaslMessage(appId, buf.retain());
}
}
| 9,827 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/sasl/SparkSaslClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.sasl;
import java.util.Map;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.NameCallback;
import javax.security.auth.callback.PasswordCallback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.sasl.RealmCallback;
import javax.security.sasl.RealmChoiceCallback;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslClient;
import javax.security.sasl.SaslException;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.spark.network.sasl.SparkSaslServer.*;
/**
* A SASL Client for Spark which simply keeps track of the state of a single SASL session, from the
* initial state to the "authenticated" state. This client initializes the protocol via a
* firstToken, which is then followed by a set of challenges and responses.
*/
public class SparkSaslClient implements SaslEncryptionBackend {
private static final Logger logger = LoggerFactory.getLogger(SparkSaslClient.class);
private final String secretKeyId;
private final SecretKeyHolder secretKeyHolder;
private final String expectedQop;
private SaslClient saslClient;
public SparkSaslClient(String secretKeyId, SecretKeyHolder secretKeyHolder, boolean encrypt) {
this.secretKeyId = secretKeyId;
this.secretKeyHolder = secretKeyHolder;
this.expectedQop = encrypt ? QOP_AUTH_CONF : QOP_AUTH;
Map<String, String> saslProps = ImmutableMap.<String, String>builder()
.put(Sasl.QOP, expectedQop)
.build();
try {
this.saslClient = Sasl.createSaslClient(new String[] { DIGEST }, null, null, DEFAULT_REALM,
saslProps, new ClientCallbackHandler());
} catch (SaslException e) {
throw Throwables.propagate(e);
}
}
/** Used to initiate SASL handshake with server. */
public synchronized byte[] firstToken() {
if (saslClient != null && saslClient.hasInitialResponse()) {
try {
return saslClient.evaluateChallenge(new byte[0]);
} catch (SaslException e) {
throw Throwables.propagate(e);
}
} else {
return new byte[0];
}
}
/** Determines whether the authentication exchange has completed. */
public synchronized boolean isComplete() {
return saslClient != null && saslClient.isComplete();
}
/** Returns the value of a negotiated property. */
public Object getNegotiatedProperty(String name) {
return saslClient.getNegotiatedProperty(name);
}
/**
* Respond to server's SASL token.
* @param token contains server's SASL token
* @return client's response SASL token
*/
public synchronized byte[] response(byte[] token) {
try {
return saslClient != null ? saslClient.evaluateChallenge(token) : new byte[0];
} catch (SaslException e) {
throw Throwables.propagate(e);
}
}
/**
* Disposes of any system resources or security-sensitive information the
* SaslClient might be using.
*/
@Override
public synchronized void dispose() {
if (saslClient != null) {
try {
saslClient.dispose();
} catch (SaslException e) {
// ignore
} finally {
saslClient = null;
}
}
}
/**
* Implementation of javax.security.auth.callback.CallbackHandler
* that works with share secrets.
*/
private class ClientCallbackHandler implements CallbackHandler {
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
for (Callback callback : callbacks) {
if (callback instanceof NameCallback) {
logger.trace("SASL client callback: setting username");
NameCallback nc = (NameCallback) callback;
nc.setName(encodeIdentifier(secretKeyHolder.getSaslUser(secretKeyId)));
} else if (callback instanceof PasswordCallback) {
logger.trace("SASL client callback: setting password");
PasswordCallback pc = (PasswordCallback) callback;
pc.setPassword(encodePassword(secretKeyHolder.getSecretKey(secretKeyId)));
} else if (callback instanceof RealmCallback) {
logger.trace("SASL client callback: setting realm");
RealmCallback rc = (RealmCallback) callback;
rc.setText(rc.getDefaultText());
} else if (callback instanceof RealmChoiceCallback) {
// ignore (?)
} else {
throw new UnsupportedCallbackException(callback, "Unrecognized SASL DIGEST-MD5 Callback");
}
}
}
}
@Override
public byte[] wrap(byte[] data, int offset, int len) throws SaslException {
return saslClient.wrap(data, offset, len);
}
@Override
public byte[] unwrap(byte[] data, int offset, int len) throws SaslException {
return saslClient.unwrap(data, offset, len);
}
}
| 9,828 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/sasl/SaslEncryptionBackend.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.sasl;
import javax.security.sasl.SaslException;
interface SaslEncryptionBackend {
/** Disposes of resources used by the backend. */
void dispose();
/** Encrypt data. */
byte[] wrap(byte[] data, int offset, int len) throws SaslException;
/** Decrypt data. */
byte[] unwrap(byte[] data, int offset, int len) throws SaslException;
}
| 9,829 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/sasl/SaslRpcHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.sasl;
import java.io.IOException;
import java.nio.ByteBuffer;
import javax.security.sasl.Sasl;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.Channel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.client.RpcResponseCallback;
import org.apache.spark.network.client.StreamCallbackWithID;
import org.apache.spark.network.client.TransportClient;
import org.apache.spark.network.server.RpcHandler;
import org.apache.spark.network.server.StreamManager;
import org.apache.spark.network.util.JavaUtils;
import org.apache.spark.network.util.TransportConf;
/**
* RPC Handler which performs SASL authentication before delegating to a child RPC handler.
* The delegate will only receive messages if the given connection has been successfully
* authenticated. A connection may be authenticated at most once.
*
* Note that the authentication process consists of multiple challenge-response pairs, each of
* which are individual RPCs.
*/
public class SaslRpcHandler extends RpcHandler {
private static final Logger logger = LoggerFactory.getLogger(SaslRpcHandler.class);
/** Transport configuration. */
private final TransportConf conf;
/** The client channel. */
private final Channel channel;
/** RpcHandler we will delegate to for authenticated connections. */
private final RpcHandler delegate;
/** Class which provides secret keys which are shared by server and client on a per-app basis. */
private final SecretKeyHolder secretKeyHolder;
private SparkSaslServer saslServer;
private boolean isComplete;
private boolean isAuthenticated;
public SaslRpcHandler(
TransportConf conf,
Channel channel,
RpcHandler delegate,
SecretKeyHolder secretKeyHolder) {
this.conf = conf;
this.channel = channel;
this.delegate = delegate;
this.secretKeyHolder = secretKeyHolder;
this.saslServer = null;
this.isComplete = false;
this.isAuthenticated = false;
}
@Override
public void receive(TransportClient client, ByteBuffer message, RpcResponseCallback callback) {
if (isComplete) {
// Authentication complete, delegate to base handler.
delegate.receive(client, message, callback);
return;
}
if (saslServer == null || !saslServer.isComplete()) {
ByteBuf nettyBuf = Unpooled.wrappedBuffer(message);
SaslMessage saslMessage;
try {
saslMessage = SaslMessage.decode(nettyBuf);
} finally {
nettyBuf.release();
}
if (saslServer == null) {
// First message in the handshake, setup the necessary state.
client.setClientId(saslMessage.appId);
saslServer = new SparkSaslServer(saslMessage.appId, secretKeyHolder,
conf.saslServerAlwaysEncrypt());
}
byte[] response;
try {
response = saslServer.response(JavaUtils.bufferToArray(
saslMessage.body().nioByteBuffer()));
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
callback.onSuccess(ByteBuffer.wrap(response));
}
// Setup encryption after the SASL response is sent, otherwise the client can't parse the
// response. It's ok to change the channel pipeline here since we are processing an incoming
// message, so the pipeline is busy and no new incoming messages will be fed to it before this
// method returns. This assumes that the code ensures, through other means, that no outbound
// messages are being written to the channel while negotiation is still going on.
if (saslServer.isComplete()) {
if (!SparkSaslServer.QOP_AUTH_CONF.equals(saslServer.getNegotiatedProperty(Sasl.QOP))) {
logger.debug("SASL authentication successful for channel {}", client);
complete(true);
return;
}
logger.debug("Enabling encryption for channel {}", client);
SaslEncryption.addToChannel(channel, saslServer, conf.maxSaslEncryptedBlockSize());
complete(false);
return;
}
}
@Override
public void receive(TransportClient client, ByteBuffer message) {
delegate.receive(client, message);
}
@Override
public StreamCallbackWithID receiveStream(
TransportClient client,
ByteBuffer message,
RpcResponseCallback callback) {
return delegate.receiveStream(client, message, callback);
}
@Override
public StreamManager getStreamManager() {
return delegate.getStreamManager();
}
@Override
public void channelActive(TransportClient client) {
delegate.channelActive(client);
}
@Override
public void channelInactive(TransportClient client) {
try {
delegate.channelInactive(client);
} finally {
if (saslServer != null) {
saslServer.dispose();
}
}
}
@Override
public void exceptionCaught(Throwable cause, TransportClient client) {
delegate.exceptionCaught(cause, client);
}
private void complete(boolean dispose) {
if (dispose) {
try {
saslServer.dispose();
} catch (RuntimeException e) {
logger.error("Error while disposing SASL server", e);
}
}
saslServer = null;
isComplete = true;
}
}
| 9,830 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/sasl/SaslClientBootstrap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.sasl;
import java.io.IOException;
import java.nio.ByteBuffer;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslException;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.Channel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.client.TransportClient;
import org.apache.spark.network.client.TransportClientBootstrap;
import org.apache.spark.network.util.JavaUtils;
import org.apache.spark.network.util.TransportConf;
/**
* Bootstraps a {@link TransportClient} by performing SASL authentication on the connection. The
* server should be setup with a {@link SaslRpcHandler} with matching keys for the given appId.
*/
public class SaslClientBootstrap implements TransportClientBootstrap {
private static final Logger logger = LoggerFactory.getLogger(SaslClientBootstrap.class);
private final TransportConf conf;
private final String appId;
private final SecretKeyHolder secretKeyHolder;
public SaslClientBootstrap(TransportConf conf, String appId, SecretKeyHolder secretKeyHolder) {
this.conf = conf;
this.appId = appId;
this.secretKeyHolder = secretKeyHolder;
}
/**
* Performs SASL authentication by sending a token, and then proceeding with the SASL
* challenge-response tokens until we either successfully authenticate or throw an exception
* due to mismatch.
*/
@Override
public void doBootstrap(TransportClient client, Channel channel) {
SparkSaslClient saslClient = new SparkSaslClient(appId, secretKeyHolder, conf.saslEncryption());
try {
byte[] payload = saslClient.firstToken();
while (!saslClient.isComplete()) {
SaslMessage msg = new SaslMessage(appId, payload);
ByteBuf buf = Unpooled.buffer(msg.encodedLength() + (int) msg.body().size());
msg.encode(buf);
buf.writeBytes(msg.body().nioByteBuffer());
ByteBuffer response = client.sendRpcSync(buf.nioBuffer(), conf.authRTTimeoutMs());
payload = saslClient.response(JavaUtils.bufferToArray(response));
}
client.setClientId(appId);
if (conf.saslEncryption()) {
if (!SparkSaslServer.QOP_AUTH_CONF.equals(saslClient.getNegotiatedProperty(Sasl.QOP))) {
throw new RuntimeException(
new SaslException("Encryption requests by negotiated non-encrypted connection."));
}
SaslEncryption.addToChannel(channel, saslClient, conf.maxSaslEncryptedBlockSize());
saslClient = null;
logger.debug("Channel {} configured for encryption.", client);
}
} catch (IOException ioe) {
throw new RuntimeException(ioe);
} finally {
if (saslClient != null) {
try {
// Once authentication is complete, the server will trust all remaining communication.
saslClient.dispose();
} catch (RuntimeException e) {
logger.error("Error while disposing SASL client", e);
}
}
}
}
}
| 9,831 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/sasl/SaslEncryption.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.sasl;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.WritableByteChannel;
import java.util.List;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelOutboundHandlerAdapter;
import io.netty.channel.ChannelPromise;
import io.netty.channel.FileRegion;
import io.netty.handler.codec.MessageToMessageDecoder;
import org.apache.spark.network.util.AbstractFileRegion;
import org.apache.spark.network.util.ByteArrayWritableChannel;
import org.apache.spark.network.util.NettyUtils;
/**
* Provides SASL-based encryption for transport channels. The single method exposed by this
* class installs the needed channel handlers on a connected channel.
*/
class SaslEncryption {
@VisibleForTesting
static final String ENCRYPTION_HANDLER_NAME = "saslEncryption";
/**
* Adds channel handlers that perform encryption / decryption of data using SASL.
*
* @param channel The channel.
* @param backend The SASL backend.
* @param maxOutboundBlockSize Max size in bytes of outgoing encrypted blocks, to control
* memory usage.
*/
static void addToChannel(
Channel channel,
SaslEncryptionBackend backend,
int maxOutboundBlockSize) {
channel.pipeline()
.addFirst(ENCRYPTION_HANDLER_NAME, new EncryptionHandler(backend, maxOutboundBlockSize))
.addFirst("saslDecryption", new DecryptionHandler(backend))
.addFirst("saslFrameDecoder", NettyUtils.createFrameDecoder());
}
private static class EncryptionHandler extends ChannelOutboundHandlerAdapter {
private final int maxOutboundBlockSize;
private final SaslEncryptionBackend backend;
EncryptionHandler(SaslEncryptionBackend backend, int maxOutboundBlockSize) {
this.backend = backend;
this.maxOutboundBlockSize = maxOutboundBlockSize;
}
/**
* Wrap the incoming message in an implementation that will perform encryption lazily. This is
* needed to guarantee ordering of the outgoing encrypted packets - they need to be decrypted in
* the same order, and netty doesn't have an atomic ChannelHandlerContext.write() API, so it
* does not guarantee any ordering.
*/
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise)
throws Exception {
ctx.write(new EncryptedMessage(backend, msg, maxOutboundBlockSize), promise);
}
@Override
public void handlerRemoved(ChannelHandlerContext ctx) throws Exception {
try {
backend.dispose();
} finally {
super.handlerRemoved(ctx);
}
}
}
private static class DecryptionHandler extends MessageToMessageDecoder<ByteBuf> {
private final SaslEncryptionBackend backend;
DecryptionHandler(SaslEncryptionBackend backend) {
this.backend = backend;
}
@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out)
throws Exception {
byte[] data;
int offset;
int length = msg.readableBytes();
if (msg.hasArray()) {
data = msg.array();
offset = msg.arrayOffset();
msg.skipBytes(length);
} else {
data = new byte[length];
msg.readBytes(data);
offset = 0;
}
out.add(Unpooled.wrappedBuffer(backend.unwrap(data, offset, length)));
}
}
@VisibleForTesting
static class EncryptedMessage extends AbstractFileRegion {
private final SaslEncryptionBackend backend;
private final boolean isByteBuf;
private final ByteBuf buf;
private final FileRegion region;
private final int maxOutboundBlockSize;
/**
* A channel used to buffer input data for encryption. The channel has an upper size bound
* so that if the input is larger than the allowed buffer, it will be broken into multiple
* chunks. Made non-final to enable lazy initialization, which saves memory.
*/
private ByteArrayWritableChannel byteChannel;
private ByteBuf currentHeader;
private ByteBuffer currentChunk;
private long currentChunkSize;
private long currentReportedBytes;
private long unencryptedChunkSize;
private long transferred;
EncryptedMessage(SaslEncryptionBackend backend, Object msg, int maxOutboundBlockSize) {
Preconditions.checkArgument(msg instanceof ByteBuf || msg instanceof FileRegion,
"Unrecognized message type: %s", msg.getClass().getName());
this.backend = backend;
this.isByteBuf = msg instanceof ByteBuf;
this.buf = isByteBuf ? (ByteBuf) msg : null;
this.region = isByteBuf ? null : (FileRegion) msg;
this.maxOutboundBlockSize = maxOutboundBlockSize;
}
/**
* Returns the size of the original (unencrypted) message.
*
* This makes assumptions about how netty treats FileRegion instances, because there's no way
* to know beforehand what will be the size of the encrypted message. Namely, it assumes
* that netty will try to transfer data from this message while
* <code>transferred() < count()</code>. So these two methods return, technically, wrong data,
* but netty doesn't know better.
*/
@Override
public long count() {
return isByteBuf ? buf.readableBytes() : region.count();
}
@Override
public long position() {
return 0;
}
/**
* Returns an approximation of the amount of data transferred. See {@link #count()}.
*/
@Override
public long transferred() {
return transferred;
}
@Override
public EncryptedMessage touch(Object o) {
super.touch(o);
if (buf != null) {
buf.touch(o);
}
if (region != null) {
region.touch(o);
}
return this;
}
@Override
public EncryptedMessage retain(int increment) {
super.retain(increment);
if (buf != null) {
buf.retain(increment);
}
if (region != null) {
region.retain(increment);
}
return this;
}
@Override
public boolean release(int decrement) {
if (region != null) {
region.release(decrement);
}
if (buf != null) {
buf.release(decrement);
}
return super.release(decrement);
}
/**
* Transfers data from the original message to the channel, encrypting it in the process.
*
* This method also breaks down the original message into smaller chunks when needed. This
* is done to keep memory usage under control. This avoids having to copy the whole message
* data into memory at once, and can avoid ballooning memory usage when transferring large
* messages such as shuffle blocks.
*
* The {@link #transferred()} counter also behaves a little funny, in that it won't go forward
* until a whole chunk has been written. This is done because the code can't use the actual
* number of bytes written to the channel as the transferred count (see {@link #count()}).
* Instead, once an encrypted chunk is written to the output (including its header), the
* size of the original block will be added to the {@link #transferred()} amount.
*/
@Override
public long transferTo(final WritableByteChannel target, final long position)
throws IOException {
Preconditions.checkArgument(position == transferred(), "Invalid position.");
long reportedWritten = 0L;
long actuallyWritten = 0L;
do {
if (currentChunk == null) {
nextChunk();
}
if (currentHeader.readableBytes() > 0) {
int bytesWritten = target.write(currentHeader.nioBuffer());
currentHeader.skipBytes(bytesWritten);
actuallyWritten += bytesWritten;
if (currentHeader.readableBytes() > 0) {
// Break out of loop if there are still header bytes left to write.
break;
}
}
actuallyWritten += target.write(currentChunk);
if (!currentChunk.hasRemaining()) {
// Only update the count of written bytes once a full chunk has been written.
// See method javadoc.
long chunkBytesRemaining = unencryptedChunkSize - currentReportedBytes;
reportedWritten += chunkBytesRemaining;
transferred += chunkBytesRemaining;
currentHeader.release();
currentHeader = null;
currentChunk = null;
currentChunkSize = 0;
currentReportedBytes = 0;
}
} while (currentChunk == null && transferred() + reportedWritten < count());
// Returning 0 triggers a backoff mechanism in netty which may harm performance. Instead,
// we return 1 until we can (i.e. until the reported count would actually match the size
// of the current chunk), at which point we resort to returning 0 so that the counts still
// match, at the cost of some performance. That situation should be rare, though.
if (reportedWritten != 0L) {
return reportedWritten;
}
if (actuallyWritten > 0 && currentReportedBytes < currentChunkSize - 1) {
transferred += 1L;
currentReportedBytes += 1L;
return 1L;
}
return 0L;
}
private void nextChunk() throws IOException {
if (byteChannel == null) {
byteChannel = new ByteArrayWritableChannel(maxOutboundBlockSize);
}
byteChannel.reset();
if (isByteBuf) {
int copied = byteChannel.write(buf.nioBuffer());
buf.skipBytes(copied);
} else {
region.transferTo(byteChannel, region.transferred());
}
byte[] encrypted = backend.wrap(byteChannel.getData(), 0, byteChannel.length());
this.currentChunk = ByteBuffer.wrap(encrypted);
this.currentChunkSize = encrypted.length;
this.currentHeader = Unpooled.copyLong(8 + currentChunkSize);
this.unencryptedChunkSize = byteChannel.length();
}
@Override
protected void deallocate() {
if (currentHeader != null) {
currentHeader.release();
}
if (buf != null) {
buf.release();
}
if (region != null) {
region.release();
}
}
}
}
| 9,832 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/server/RpcHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.server;
import java.nio.ByteBuffer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.client.RpcResponseCallback;
import org.apache.spark.network.client.StreamCallbackWithID;
import org.apache.spark.network.client.TransportClient;
/**
* Handler for sendRPC() messages sent by {@link org.apache.spark.network.client.TransportClient}s.
*/
public abstract class RpcHandler {
private static final RpcResponseCallback ONE_WAY_CALLBACK = new OneWayRpcCallback();
/**
* Receive a single RPC message. Any exception thrown while in this method will be sent back to
* the client in string form as a standard RPC failure.
*
* Neither this method nor #receiveStream will be called in parallel for a single
* TransportClient (i.e., channel).
*
* @param client A channel client which enables the handler to make requests back to the sender
* of this RPC. This will always be the exact same object for a particular channel.
* @param message The serialized bytes of the RPC.
* @param callback Callback which should be invoked exactly once upon success or failure of the
* RPC.
*/
public abstract void receive(
TransportClient client,
ByteBuffer message,
RpcResponseCallback callback);
/**
* Receive a single RPC message which includes data that is to be received as a stream. Any
* exception thrown while in this method will be sent back to the client in string form as a
* standard RPC failure.
*
* Neither this method nor #receive will be called in parallel for a single TransportClient
* (i.e., channel).
*
* An error while reading data from the stream
* ({@link org.apache.spark.network.client.StreamCallback#onData(String, ByteBuffer)})
* will fail the entire channel. A failure in "post-processing" the stream in
* {@link org.apache.spark.network.client.StreamCallback#onComplete(String)} will result in an
* rpcFailure, but the channel will remain active.
*
* @param client A channel client which enables the handler to make requests back to the sender
* of this RPC. This will always be the exact same object for a particular channel.
* @param messageHeader The serialized bytes of the header portion of the RPC. This is in meant
* to be relatively small, and will be buffered entirely in memory, to
* facilitate how the streaming portion should be received.
* @param callback Callback which should be invoked exactly once upon success or failure of the
* RPC.
* @return a StreamCallback for handling the accompanying streaming data
*/
public StreamCallbackWithID receiveStream(
TransportClient client,
ByteBuffer messageHeader,
RpcResponseCallback callback) {
throw new UnsupportedOperationException();
}
/**
* Returns the StreamManager which contains the state about which streams are currently being
* fetched by a TransportClient.
*/
public abstract StreamManager getStreamManager();
/**
* Receives an RPC message that does not expect a reply. The default implementation will
* call "{@link #receive(TransportClient, ByteBuffer, RpcResponseCallback)}" and log a warning if
* any of the callback methods are called.
*
* @param client A channel client which enables the handler to make requests back to the sender
* of this RPC. This will always be the exact same object for a particular channel.
* @param message The serialized bytes of the RPC.
*/
public void receive(TransportClient client, ByteBuffer message) {
receive(client, message, ONE_WAY_CALLBACK);
}
/**
* Invoked when the channel associated with the given client is active.
*/
public void channelActive(TransportClient client) { }
/**
* Invoked when the channel associated with the given client is inactive.
* No further requests will come from this client.
*/
public void channelInactive(TransportClient client) { }
public void exceptionCaught(Throwable cause, TransportClient client) { }
private static class OneWayRpcCallback implements RpcResponseCallback {
private static final Logger logger = LoggerFactory.getLogger(OneWayRpcCallback.class);
@Override
public void onSuccess(ByteBuffer response) {
logger.warn("Response provided for one-way RPC.");
}
@Override
public void onFailure(Throwable e) {
logger.error("Error response provided for one-way RPC.", e);
}
}
}
| 9,833 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/server/StreamManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.server;
import io.netty.channel.Channel;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.client.TransportClient;
/**
* The StreamManager is used to fetch individual chunks from a stream. This is used in
* {@link TransportRequestHandler} in order to respond to fetchChunk() requests. Creation of the
* stream is outside the scope of the transport layer, but a given stream is guaranteed to be read
* by only one client connection, meaning that getChunk() for a particular stream will be called
* serially and that once the connection associated with the stream is closed, that stream will
* never be used again.
*/
public abstract class StreamManager {
/**
* Called in response to a fetchChunk() request. The returned buffer will be passed as-is to the
* client. A single stream will be associated with a single TCP connection, so this method
* will not be called in parallel for a particular stream.
*
* Chunks may be requested in any order, and requests may be repeated, but it is not required
* that implementations support this behavior.
*
* The returned ManagedBuffer will be release()'d after being written to the network.
*
* @param streamId id of a stream that has been previously registered with the StreamManager.
* @param chunkIndex 0-indexed chunk of the stream that's requested
*/
public abstract ManagedBuffer getChunk(long streamId, int chunkIndex);
/**
* Called in response to a stream() request. The returned data is streamed to the client
* through a single TCP connection.
*
* Note the <code>streamId</code> argument is not related to the similarly named argument in the
* {@link #getChunk(long, int)} method.
*
* @param streamId id of a stream that has been previously registered with the StreamManager.
* @return A managed buffer for the stream, or null if the stream was not found.
*/
public ManagedBuffer openStream(String streamId) {
throw new UnsupportedOperationException();
}
/**
* Indicates that the given channel has been terminated. After this occurs, we are guaranteed not
* to read from the associated streams again, so any state can be cleaned up.
*/
public void connectionTerminated(Channel channel) { }
/**
* Verify that the client is authorized to read from the given stream.
*
* @throws SecurityException If client is not authorized.
*/
public void checkAuthorization(TransportClient client, long streamId) { }
/**
* Return the number of chunks being transferred and not finished yet in this StreamManager.
*/
public long chunksBeingTransferred() {
return 0;
}
/**
* Called when start sending a chunk.
*/
public void chunkBeingSent(long streamId) { }
/**
* Called when start sending a stream.
*/
public void streamBeingSent(String streamId) { }
/**
* Called when a chunk is successfully sent.
*/
public void chunkSent(long streamId) { }
/**
* Called when a stream is successfully sent.
*/
public void streamSent(String streamId) { }
}
| 9,834 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/server/MessageHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.server;
import org.apache.spark.network.protocol.Message;
/**
* Handles either request or response messages coming off of Netty. A MessageHandler instance
* is associated with a single Netty Channel (though it may have multiple clients on the same
* Channel.)
*/
public abstract class MessageHandler<T extends Message> {
/** Handles the receipt of a single message. */
public abstract void handle(T message) throws Exception;
/** Invoked when the channel this MessageHandler is on is active. */
public abstract void channelActive();
/** Invoked when an exception was caught on the Channel. */
public abstract void exceptionCaught(Throwable cause);
/** Invoked when the channel this MessageHandler is on is inactive. */
public abstract void channelInactive();
}
| 9,835 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/server/NoOpRpcHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.server;
import java.nio.ByteBuffer;
import org.apache.spark.network.client.RpcResponseCallback;
import org.apache.spark.network.client.TransportClient;
/** An RpcHandler suitable for a client-only TransportContext, which cannot receive RPCs. */
public class NoOpRpcHandler extends RpcHandler {
private final StreamManager streamManager;
public NoOpRpcHandler() {
streamManager = new OneForOneStreamManager();
}
@Override
public void receive(TransportClient client, ByteBuffer message, RpcResponseCallback callback) {
throw new UnsupportedOperationException("Cannot handle messages");
}
@Override
public StreamManager getStreamManager() { return streamManager; }
}
| 9,836 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/server/TransportRequestHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.server;
import java.io.IOException;
import java.net.SocketAddress;
import java.nio.ByteBuffer;
import com.google.common.base.Throwables;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NioManagedBuffer;
import org.apache.spark.network.client.*;
import org.apache.spark.network.protocol.*;
import org.apache.spark.network.util.TransportFrameDecoder;
import static org.apache.spark.network.util.NettyUtils.getRemoteAddress;
/**
* A handler that processes requests from clients and writes chunk data back. Each handler is
* attached to a single Netty channel, and keeps track of which streams have been fetched via this
* channel, in order to clean them up if the channel is terminated (see #channelUnregistered).
*
* The messages should have been processed by the pipeline setup by {@link TransportServer}.
*/
public class TransportRequestHandler extends MessageHandler<RequestMessage> {
private static final Logger logger = LoggerFactory.getLogger(TransportRequestHandler.class);
/** The Netty channel that this handler is associated with. */
private final Channel channel;
/** Client on the same channel allowing us to talk back to the requester. */
private final TransportClient reverseClient;
/** Handles all RPC messages. */
private final RpcHandler rpcHandler;
/** Returns each chunk part of a stream. */
private final StreamManager streamManager;
/** The max number of chunks being transferred and not finished yet. */
private final long maxChunksBeingTransferred;
public TransportRequestHandler(
Channel channel,
TransportClient reverseClient,
RpcHandler rpcHandler,
Long maxChunksBeingTransferred) {
this.channel = channel;
this.reverseClient = reverseClient;
this.rpcHandler = rpcHandler;
this.streamManager = rpcHandler.getStreamManager();
this.maxChunksBeingTransferred = maxChunksBeingTransferred;
}
@Override
public void exceptionCaught(Throwable cause) {
rpcHandler.exceptionCaught(cause, reverseClient);
}
@Override
public void channelActive() {
rpcHandler.channelActive(reverseClient);
}
@Override
public void channelInactive() {
if (streamManager != null) {
try {
streamManager.connectionTerminated(channel);
} catch (RuntimeException e) {
logger.error("StreamManager connectionTerminated() callback failed.", e);
}
}
rpcHandler.channelInactive(reverseClient);
}
@Override
public void handle(RequestMessage request) {
if (request instanceof ChunkFetchRequest) {
processFetchRequest((ChunkFetchRequest) request);
} else if (request instanceof RpcRequest) {
processRpcRequest((RpcRequest) request);
} else if (request instanceof OneWayMessage) {
processOneWayMessage((OneWayMessage) request);
} else if (request instanceof StreamRequest) {
processStreamRequest((StreamRequest) request);
} else if (request instanceof UploadStream) {
processStreamUpload((UploadStream) request);
} else {
throw new IllegalArgumentException("Unknown request type: " + request);
}
}
private void processFetchRequest(final ChunkFetchRequest req) {
if (logger.isTraceEnabled()) {
logger.trace("Received req from {} to fetch block {}", getRemoteAddress(channel),
req.streamChunkId);
}
long chunksBeingTransferred = streamManager.chunksBeingTransferred();
if (chunksBeingTransferred >= maxChunksBeingTransferred) {
logger.warn("The number of chunks being transferred {} is above {}, close the connection.",
chunksBeingTransferred, maxChunksBeingTransferred);
channel.close();
return;
}
ManagedBuffer buf;
try {
streamManager.checkAuthorization(reverseClient, req.streamChunkId.streamId);
buf = streamManager.getChunk(req.streamChunkId.streamId, req.streamChunkId.chunkIndex);
} catch (Exception e) {
logger.error(String.format("Error opening block %s for request from %s",
req.streamChunkId, getRemoteAddress(channel)), e);
respond(new ChunkFetchFailure(req.streamChunkId, Throwables.getStackTraceAsString(e)));
return;
}
streamManager.chunkBeingSent(req.streamChunkId.streamId);
respond(new ChunkFetchSuccess(req.streamChunkId, buf)).addListener(future -> {
streamManager.chunkSent(req.streamChunkId.streamId);
});
}
private void processStreamRequest(final StreamRequest req) {
if (logger.isTraceEnabled()) {
logger.trace("Received req from {} to fetch stream {}", getRemoteAddress(channel),
req.streamId);
}
long chunksBeingTransferred = streamManager.chunksBeingTransferred();
if (chunksBeingTransferred >= maxChunksBeingTransferred) {
logger.warn("The number of chunks being transferred {} is above {}, close the connection.",
chunksBeingTransferred, maxChunksBeingTransferred);
channel.close();
return;
}
ManagedBuffer buf;
try {
buf = streamManager.openStream(req.streamId);
} catch (Exception e) {
logger.error(String.format(
"Error opening stream %s for request from %s", req.streamId, getRemoteAddress(channel)), e);
respond(new StreamFailure(req.streamId, Throwables.getStackTraceAsString(e)));
return;
}
if (buf != null) {
streamManager.streamBeingSent(req.streamId);
respond(new StreamResponse(req.streamId, buf.size(), buf)).addListener(future -> {
streamManager.streamSent(req.streamId);
});
} else {
respond(new StreamFailure(req.streamId, String.format(
"Stream '%s' was not found.", req.streamId)));
}
}
private void processRpcRequest(final RpcRequest req) {
try {
rpcHandler.receive(reverseClient, req.body().nioByteBuffer(), new RpcResponseCallback() {
@Override
public void onSuccess(ByteBuffer response) {
respond(new RpcResponse(req.requestId, new NioManagedBuffer(response)));
}
@Override
public void onFailure(Throwable e) {
respond(new RpcFailure(req.requestId, Throwables.getStackTraceAsString(e)));
}
});
} catch (Exception e) {
logger.error("Error while invoking RpcHandler#receive() on RPC id " + req.requestId, e);
respond(new RpcFailure(req.requestId, Throwables.getStackTraceAsString(e)));
} finally {
req.body().release();
}
}
/**
* Handle a request from the client to upload a stream of data.
*/
private void processStreamUpload(final UploadStream req) {
assert (req.body() == null);
try {
RpcResponseCallback callback = new RpcResponseCallback() {
@Override
public void onSuccess(ByteBuffer response) {
respond(new RpcResponse(req.requestId, new NioManagedBuffer(response)));
}
@Override
public void onFailure(Throwable e) {
respond(new RpcFailure(req.requestId, Throwables.getStackTraceAsString(e)));
}
};
TransportFrameDecoder frameDecoder = (TransportFrameDecoder)
channel.pipeline().get(TransportFrameDecoder.HANDLER_NAME);
ByteBuffer meta = req.meta.nioByteBuffer();
StreamCallbackWithID streamHandler = rpcHandler.receiveStream(reverseClient, meta, callback);
if (streamHandler == null) {
throw new NullPointerException("rpcHandler returned a null streamHandler");
}
StreamCallbackWithID wrappedCallback = new StreamCallbackWithID() {
@Override
public void onData(String streamId, ByteBuffer buf) throws IOException {
streamHandler.onData(streamId, buf);
}
@Override
public void onComplete(String streamId) throws IOException {
try {
streamHandler.onComplete(streamId);
callback.onSuccess(ByteBuffer.allocate(0));
} catch (Exception ex) {
IOException ioExc = new IOException("Failure post-processing complete stream;" +
" failing this rpc and leaving channel active", ex);
callback.onFailure(ioExc);
streamHandler.onFailure(streamId, ioExc);
}
}
@Override
public void onFailure(String streamId, Throwable cause) throws IOException {
callback.onFailure(new IOException("Destination failed while reading stream", cause));
streamHandler.onFailure(streamId, cause);
}
@Override
public String getID() {
return streamHandler.getID();
}
};
if (req.bodyByteCount > 0) {
StreamInterceptor<RequestMessage> interceptor = new StreamInterceptor<>(
this, wrappedCallback.getID(), req.bodyByteCount, wrappedCallback);
frameDecoder.setInterceptor(interceptor);
} else {
wrappedCallback.onComplete(wrappedCallback.getID());
}
} catch (Exception e) {
logger.error("Error while invoking RpcHandler#receive() on RPC id " + req.requestId, e);
respond(new RpcFailure(req.requestId, Throwables.getStackTraceAsString(e)));
// We choose to totally fail the channel, rather than trying to recover as we do in other
// cases. We don't know how many bytes of the stream the client has already sent for the
// stream, it's not worth trying to recover.
channel.pipeline().fireExceptionCaught(e);
} finally {
req.meta.release();
}
}
private void processOneWayMessage(OneWayMessage req) {
try {
rpcHandler.receive(reverseClient, req.body().nioByteBuffer());
} catch (Exception e) {
logger.error("Error while invoking RpcHandler#receive() for one-way message.", e);
} finally {
req.body().release();
}
}
/**
* Responds to a single message with some Encodable object. If a failure occurs while sending,
* it will be logged and the channel closed.
*/
private ChannelFuture respond(Encodable result) {
SocketAddress remoteAddress = channel.remoteAddress();
return channel.writeAndFlush(result).addListener(future -> {
if (future.isSuccess()) {
logger.trace("Sent result {} to client {}", result, remoteAddress);
} else {
logger.error(String.format("Error sending result %s to %s; closing connection",
result, remoteAddress), future.cause());
channel.close();
}
});
}
}
| 9,837 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/server/TransportServerBootstrap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.server;
import io.netty.channel.Channel;
/**
* A bootstrap which is executed on a TransportServer's client channel once a client connects
* to the server. This allows customizing the client channel to allow for things such as SASL
* authentication.
*/
public interface TransportServerBootstrap {
/**
* Customizes the channel to include new features, if needed.
*
* @param channel The connected channel opened by the client.
* @param rpcHandler The RPC handler for the server.
* @return The RPC handler to use for the channel.
*/
RpcHandler doBootstrap(Channel channel, RpcHandler rpcHandler);
}
| 9,838 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/server/TransportChannelHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.server;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import io.netty.handler.timeout.IdleState;
import io.netty.handler.timeout.IdleStateEvent;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.client.TransportClient;
import org.apache.spark.network.client.TransportResponseHandler;
import org.apache.spark.network.protocol.RequestMessage;
import org.apache.spark.network.protocol.ResponseMessage;
import static org.apache.spark.network.util.NettyUtils.getRemoteAddress;
/**
* The single Transport-level Channel handler which is used for delegating requests to the
* {@link TransportRequestHandler} and responses to the {@link TransportResponseHandler}.
*
* All channels created in the transport layer are bidirectional. When the Client initiates a Netty
* Channel with a RequestMessage (which gets handled by the Server's RequestHandler), the Server
* will produce a ResponseMessage (handled by the Client's ResponseHandler). However, the Server
* also gets a handle on the same Channel, so it may then begin to send RequestMessages to the
* Client.
* This means that the Client also needs a RequestHandler and the Server needs a ResponseHandler,
* for the Client's responses to the Server's requests.
*
* This class also handles timeouts from a {@link io.netty.handler.timeout.IdleStateHandler}.
* We consider a connection timed out if there are outstanding fetch or RPC requests but no traffic
* on the channel for at least `requestTimeoutMs`. Note that this is duplex traffic; we will not
* timeout if the client is continuously sending but getting no responses, for simplicity.
*/
public class TransportChannelHandler extends ChannelInboundHandlerAdapter {
private static final Logger logger = LoggerFactory.getLogger(TransportChannelHandler.class);
private final TransportClient client;
private final TransportResponseHandler responseHandler;
private final TransportRequestHandler requestHandler;
private final long requestTimeoutNs;
private final boolean closeIdleConnections;
public TransportChannelHandler(
TransportClient client,
TransportResponseHandler responseHandler,
TransportRequestHandler requestHandler,
long requestTimeoutMs,
boolean closeIdleConnections) {
this.client = client;
this.responseHandler = responseHandler;
this.requestHandler = requestHandler;
this.requestTimeoutNs = requestTimeoutMs * 1000L * 1000;
this.closeIdleConnections = closeIdleConnections;
}
public TransportClient getClient() {
return client;
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
logger.warn("Exception in connection from " + getRemoteAddress(ctx.channel()),
cause);
requestHandler.exceptionCaught(cause);
responseHandler.exceptionCaught(cause);
ctx.close();
}
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
try {
requestHandler.channelActive();
} catch (RuntimeException e) {
logger.error("Exception from request handler while channel is active", e);
}
try {
responseHandler.channelActive();
} catch (RuntimeException e) {
logger.error("Exception from response handler while channel is active", e);
}
super.channelActive(ctx);
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
try {
requestHandler.channelInactive();
} catch (RuntimeException e) {
logger.error("Exception from request handler while channel is inactive", e);
}
try {
responseHandler.channelInactive();
} catch (RuntimeException e) {
logger.error("Exception from response handler while channel is inactive", e);
}
super.channelInactive(ctx);
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object request) throws Exception {
if (request instanceof RequestMessage) {
requestHandler.handle((RequestMessage) request);
} else if (request instanceof ResponseMessage) {
responseHandler.handle((ResponseMessage) request);
} else {
ctx.fireChannelRead(request);
}
}
/** Triggered based on events from an {@link io.netty.handler.timeout.IdleStateHandler}. */
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt instanceof IdleStateEvent) {
IdleStateEvent e = (IdleStateEvent) evt;
// See class comment for timeout semantics. In addition to ensuring we only timeout while
// there are outstanding requests, we also do a secondary consistency check to ensure
// there's no race between the idle timeout and incrementing the numOutstandingRequests
// (see SPARK-7003).
//
// To avoid a race between TransportClientFactory.createClient() and this code which could
// result in an inactive client being returned, this needs to run in a synchronized block.
synchronized (this) {
boolean isActuallyOverdue =
System.nanoTime() - responseHandler.getTimeOfLastRequestNs() > requestTimeoutNs;
if (e.state() == IdleState.ALL_IDLE && isActuallyOverdue) {
if (responseHandler.numOutstandingRequests() > 0) {
String address = getRemoteAddress(ctx.channel());
logger.error("Connection to {} has been quiet for {} ms while there are outstanding " +
"requests. Assuming connection is dead; please adjust spark.network.timeout if " +
"this is wrong.", address, requestTimeoutNs / 1000 / 1000);
client.timeOut();
ctx.close();
} else if (closeIdleConnections) {
// While CloseIdleConnections is enable, we also close idle connection
client.timeOut();
ctx.close();
}
}
}
}
ctx.fireUserEventTriggered(evt);
}
public TransportResponseHandler getResponseHandler() {
return responseHandler;
}
}
| 9,839 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/server/TransportServer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.server;
import java.io.Closeable;
import java.net.InetSocketAddress;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.codahale.metrics.MetricSet;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import org.apache.commons.lang3.SystemUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.TransportContext;
import org.apache.spark.network.util.*;
/**
* Server for the efficient, low-level streaming service.
*/
public class TransportServer implements Closeable {
private static final Logger logger = LoggerFactory.getLogger(TransportServer.class);
private final TransportContext context;
private final TransportConf conf;
private final RpcHandler appRpcHandler;
private final List<TransportServerBootstrap> bootstraps;
private ServerBootstrap bootstrap;
private ChannelFuture channelFuture;
private int port = -1;
private NettyMemoryMetrics metrics;
/**
* Creates a TransportServer that binds to the given host and the given port, or to any available
* if 0. If you don't want to bind to any special host, set "hostToBind" to null.
* */
public TransportServer(
TransportContext context,
String hostToBind,
int portToBind,
RpcHandler appRpcHandler,
List<TransportServerBootstrap> bootstraps) {
this.context = context;
this.conf = context.getConf();
this.appRpcHandler = appRpcHandler;
this.bootstraps = Lists.newArrayList(Preconditions.checkNotNull(bootstraps));
boolean shouldClose = true;
try {
init(hostToBind, portToBind);
shouldClose = false;
} finally {
if (shouldClose) {
JavaUtils.closeQuietly(this);
}
}
}
public int getPort() {
if (port == -1) {
throw new IllegalStateException("Server not initialized");
}
return port;
}
private void init(String hostToBind, int portToBind) {
IOMode ioMode = IOMode.valueOf(conf.ioMode());
EventLoopGroup bossGroup =
NettyUtils.createEventLoop(ioMode, conf.serverThreads(), conf.getModuleName() + "-server");
EventLoopGroup workerGroup = bossGroup;
PooledByteBufAllocator allocator = NettyUtils.createPooledByteBufAllocator(
conf.preferDirectBufs(), true /* allowCache */, conf.serverThreads());
bootstrap = new ServerBootstrap()
.group(bossGroup, workerGroup)
.channel(NettyUtils.getServerChannelClass(ioMode))
.option(ChannelOption.ALLOCATOR, allocator)
.option(ChannelOption.SO_REUSEADDR, !SystemUtils.IS_OS_WINDOWS)
.childOption(ChannelOption.ALLOCATOR, allocator);
this.metrics = new NettyMemoryMetrics(
allocator, conf.getModuleName() + "-server", conf);
if (conf.backLog() > 0) {
bootstrap.option(ChannelOption.SO_BACKLOG, conf.backLog());
}
if (conf.receiveBuf() > 0) {
bootstrap.childOption(ChannelOption.SO_RCVBUF, conf.receiveBuf());
}
if (conf.sendBuf() > 0) {
bootstrap.childOption(ChannelOption.SO_SNDBUF, conf.sendBuf());
}
bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) {
logger.debug("New connection accepted for remote address {}.", ch.remoteAddress());
RpcHandler rpcHandler = appRpcHandler;
for (TransportServerBootstrap bootstrap : bootstraps) {
rpcHandler = bootstrap.doBootstrap(ch, rpcHandler);
}
context.initializePipeline(ch, rpcHandler);
}
});
InetSocketAddress address = hostToBind == null ?
new InetSocketAddress(portToBind): new InetSocketAddress(hostToBind, portToBind);
channelFuture = bootstrap.bind(address);
channelFuture.syncUninterruptibly();
port = ((InetSocketAddress) channelFuture.channel().localAddress()).getPort();
logger.debug("Shuffle server started on port: {}", port);
}
public MetricSet getAllMetrics() {
return metrics;
}
@Override
public void close() {
if (channelFuture != null) {
// close is a local operation and should finish within milliseconds; timeout just to be safe
channelFuture.channel().close().awaitUninterruptibly(10, TimeUnit.SECONDS);
channelFuture = null;
}
if (bootstrap != null && bootstrap.config().group() != null) {
bootstrap.config().group().shutdownGracefully();
}
if (bootstrap != null && bootstrap.config().childGroup() != null) {
bootstrap.config().childGroup().shutdownGracefully();
}
bootstrap = null;
}
}
| 9,840 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.server;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import io.netty.channel.Channel;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.client.TransportClient;
/**
* StreamManager which allows registration of an Iterator<ManagedBuffer>, which are
* individually fetched as chunks by the client. Each registered buffer is one chunk.
*/
public class OneForOneStreamManager extends StreamManager {
private static final Logger logger = LoggerFactory.getLogger(OneForOneStreamManager.class);
private final AtomicLong nextStreamId;
private final ConcurrentHashMap<Long, StreamState> streams;
/** State of a single stream. */
private static class StreamState {
final String appId;
final Iterator<ManagedBuffer> buffers;
// The channel associated to the stream
final Channel associatedChannel;
// Used to keep track of the index of the buffer that the user has retrieved, just to ensure
// that the caller only requests each chunk one at a time, in order.
int curChunk = 0;
// Used to keep track of the number of chunks being transferred and not finished yet.
volatile long chunksBeingTransferred = 0L;
StreamState(String appId, Iterator<ManagedBuffer> buffers, Channel channel) {
this.appId = appId;
this.buffers = Preconditions.checkNotNull(buffers);
this.associatedChannel = channel;
}
}
public OneForOneStreamManager() {
// For debugging purposes, start with a random stream id to help identifying different streams.
// This does not need to be globally unique, only unique to this class.
nextStreamId = new AtomicLong((long) new Random().nextInt(Integer.MAX_VALUE) * 1000);
streams = new ConcurrentHashMap<>();
}
@Override
public ManagedBuffer getChunk(long streamId, int chunkIndex) {
StreamState state = streams.get(streamId);
if (chunkIndex != state.curChunk) {
throw new IllegalStateException(String.format(
"Received out-of-order chunk index %s (expected %s)", chunkIndex, state.curChunk));
} else if (!state.buffers.hasNext()) {
throw new IllegalStateException(String.format(
"Requested chunk index beyond end %s", chunkIndex));
}
state.curChunk += 1;
ManagedBuffer nextChunk = state.buffers.next();
if (!state.buffers.hasNext()) {
logger.trace("Removing stream id {}", streamId);
streams.remove(streamId);
}
return nextChunk;
}
@Override
public ManagedBuffer openStream(String streamChunkId) {
Pair<Long, Integer> streamChunkIdPair = parseStreamChunkId(streamChunkId);
return getChunk(streamChunkIdPair.getLeft(), streamChunkIdPair.getRight());
}
public static String genStreamChunkId(long streamId, int chunkId) {
return String.format("%d_%d", streamId, chunkId);
}
// Parse streamChunkId to be stream id and chunk id. This is used when fetch remote chunk as a
// stream.
public static Pair<Long, Integer> parseStreamChunkId(String streamChunkId) {
String[] array = streamChunkId.split("_");
assert array.length == 2:
"Stream id and chunk index should be specified.";
long streamId = Long.valueOf(array[0]);
int chunkIndex = Integer.valueOf(array[1]);
return ImmutablePair.of(streamId, chunkIndex);
}
@Override
public void connectionTerminated(Channel channel) {
// Close all streams which have been associated with the channel.
for (Map.Entry<Long, StreamState> entry: streams.entrySet()) {
StreamState state = entry.getValue();
if (state.associatedChannel == channel) {
streams.remove(entry.getKey());
// Release all remaining buffers.
while (state.buffers.hasNext()) {
state.buffers.next().release();
}
}
}
}
@Override
public void checkAuthorization(TransportClient client, long streamId) {
if (client.getClientId() != null) {
StreamState state = streams.get(streamId);
Preconditions.checkArgument(state != null, "Unknown stream ID.");
if (!client.getClientId().equals(state.appId)) {
throw new SecurityException(String.format(
"Client %s not authorized to read stream %d (app %s).",
client.getClientId(),
streamId,
state.appId));
}
}
}
@Override
public void chunkBeingSent(long streamId) {
StreamState streamState = streams.get(streamId);
if (streamState != null) {
streamState.chunksBeingTransferred++;
}
}
@Override
public void streamBeingSent(String streamId) {
chunkBeingSent(parseStreamChunkId(streamId).getLeft());
}
@Override
public void chunkSent(long streamId) {
StreamState streamState = streams.get(streamId);
if (streamState != null) {
streamState.chunksBeingTransferred--;
}
}
@Override
public void streamSent(String streamId) {
chunkSent(OneForOneStreamManager.parseStreamChunkId(streamId).getLeft());
}
@Override
public long chunksBeingTransferred() {
long sum = 0L;
for (StreamState streamState: streams.values()) {
sum += streamState.chunksBeingTransferred;
}
return sum;
}
/**
* Registers a stream of ManagedBuffers which are served as individual chunks one at a time to
* callers. Each ManagedBuffer will be release()'d after it is transferred on the wire. If a
* client connection is closed before the iterator is fully drained, then the remaining buffers
* will all be release()'d.
*
* If an app ID is provided, only callers who've authenticated with the given app ID will be
* allowed to fetch from this stream.
*
* This method also associates the stream with a single client connection, which is guaranteed
* to be the only reader of the stream. Once the connection is closed, the stream will never
* be used again, enabling cleanup by `connectionTerminated`.
*/
public long registerStream(String appId, Iterator<ManagedBuffer> buffers, Channel channel) {
long myStreamId = nextStreamId.getAndIncrement();
streams.put(myStreamId, new StreamState(appId, buffers, channel));
return myStreamId;
}
@VisibleForTesting
public int numStreamStates() {
return streams.size();
}
}
| 9,841 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/Message.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import io.netty.buffer.ByteBuf;
import org.apache.spark.network.buffer.ManagedBuffer;
/** An on-the-wire transmittable message. */
public interface Message extends Encodable {
/** Used to identify this request type. */
Type type();
/** An optional body for the message. */
ManagedBuffer body();
/** Whether to include the body of the message in the same frame as the message. */
boolean isBodyInFrame();
/** Preceding every serialized Message is its type, which allows us to deserialize it. */
enum Type implements Encodable {
ChunkFetchRequest(0), ChunkFetchSuccess(1), ChunkFetchFailure(2),
RpcRequest(3), RpcResponse(4), RpcFailure(5),
StreamRequest(6), StreamResponse(7), StreamFailure(8),
OneWayMessage(9), UploadStream(10), User(-1);
private final byte id;
Type(int id) {
assert id < 128 : "Cannot have more than 128 message types";
this.id = (byte) id;
}
public byte id() { return id; }
@Override public int encodedLength() { return 1; }
@Override public void encode(ByteBuf buf) { buf.writeByte(id); }
public static Type decode(ByteBuf buf) {
byte id = buf.readByte();
switch (id) {
case 0: return ChunkFetchRequest;
case 1: return ChunkFetchSuccess;
case 2: return ChunkFetchFailure;
case 3: return RpcRequest;
case 4: return RpcResponse;
case 5: return RpcFailure;
case 6: return StreamRequest;
case 7: return StreamResponse;
case 8: return StreamFailure;
case 9: return OneWayMessage;
case 10: return UploadStream;
case -1: throw new IllegalArgumentException("User type messages cannot be decoded.");
default: throw new IllegalArgumentException("Unknown message type: " + id);
}
}
}
}
| 9,842 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/RequestMessage.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
/** Messages from the client to the server. */
public interface RequestMessage extends Message {
// token interface
}
| 9,843 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/ChunkFetchSuccess.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NettyManagedBuffer;
/**
* Response to {@link ChunkFetchRequest} when a chunk exists and has been successfully fetched.
*
* Note that the server-side encoding of this messages does NOT include the buffer itself, as this
* may be written by Netty in a more efficient manner (i.e., zero-copy write).
* Similarly, the client-side decoding will reuse the Netty ByteBuf as the buffer.
*/
public final class ChunkFetchSuccess extends AbstractResponseMessage {
public final StreamChunkId streamChunkId;
public ChunkFetchSuccess(StreamChunkId streamChunkId, ManagedBuffer buffer) {
super(buffer, true);
this.streamChunkId = streamChunkId;
}
@Override
public Type type() { return Type.ChunkFetchSuccess; }
@Override
public int encodedLength() {
return streamChunkId.encodedLength();
}
/** Encoding does NOT include 'buffer' itself. See {@link MessageEncoder}. */
@Override
public void encode(ByteBuf buf) {
streamChunkId.encode(buf);
}
@Override
public ResponseMessage createFailureResponse(String error) {
return new ChunkFetchFailure(streamChunkId, error);
}
/** Decoding uses the given ByteBuf as our data, and will retain() it. */
public static ChunkFetchSuccess decode(ByteBuf buf) {
StreamChunkId streamChunkId = StreamChunkId.decode(buf);
buf.retain();
NettyManagedBuffer managedBuf = new NettyManagedBuffer(buf.duplicate());
return new ChunkFetchSuccess(streamChunkId, managedBuf);
}
@Override
public int hashCode() {
return Objects.hashCode(streamChunkId, body());
}
@Override
public boolean equals(Object other) {
if (other instanceof ChunkFetchSuccess) {
ChunkFetchSuccess o = (ChunkFetchSuccess) other;
return streamChunkId.equals(o.streamChunkId) && super.equals(o);
}
return false;
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("streamChunkId", streamChunkId)
.add("buffer", body())
.toString();
}
}
| 9,844 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/MessageDecoder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import java.util.List;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.MessageToMessageDecoder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Decoder used by the client side to encode server-to-client responses.
* This encoder is stateless so it is safe to be shared by multiple threads.
*/
@ChannelHandler.Sharable
public final class MessageDecoder extends MessageToMessageDecoder<ByteBuf> {
private static final Logger logger = LoggerFactory.getLogger(MessageDecoder.class);
public static final MessageDecoder INSTANCE = new MessageDecoder();
private MessageDecoder() {}
@Override
public void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) {
Message.Type msgType = Message.Type.decode(in);
Message decoded = decode(msgType, in);
assert decoded.type() == msgType;
logger.trace("Received message {}: {}", msgType, decoded);
out.add(decoded);
}
private Message decode(Message.Type msgType, ByteBuf in) {
switch (msgType) {
case ChunkFetchRequest:
return ChunkFetchRequest.decode(in);
case ChunkFetchSuccess:
return ChunkFetchSuccess.decode(in);
case ChunkFetchFailure:
return ChunkFetchFailure.decode(in);
case RpcRequest:
return RpcRequest.decode(in);
case RpcResponse:
return RpcResponse.decode(in);
case RpcFailure:
return RpcFailure.decode(in);
case OneWayMessage:
return OneWayMessage.decode(in);
case StreamRequest:
return StreamRequest.decode(in);
case StreamResponse:
return StreamResponse.decode(in);
case StreamFailure:
return StreamFailure.decode(in);
case UploadStream:
return UploadStream.decode(in);
default:
throw new IllegalArgumentException("Unexpected message type: " + msgType);
}
}
}
| 9,845 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/AbstractMessage.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import com.google.common.base.Objects;
import org.apache.spark.network.buffer.ManagedBuffer;
/**
* Abstract class for messages which optionally contain a body kept in a separate buffer.
*/
public abstract class AbstractMessage implements Message {
private final ManagedBuffer body;
private final boolean isBodyInFrame;
protected AbstractMessage() {
this(null, false);
}
protected AbstractMessage(ManagedBuffer body, boolean isBodyInFrame) {
this.body = body;
this.isBodyInFrame = isBodyInFrame;
}
@Override
public ManagedBuffer body() {
return body;
}
@Override
public boolean isBodyInFrame() {
return isBodyInFrame;
}
protected boolean equals(AbstractMessage other) {
return isBodyInFrame == other.isBodyInFrame && Objects.equal(body, other.body);
}
}
| 9,846 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/ChunkFetchRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
/**
* Request to fetch a sequence of a single chunk of a stream. This will correspond to a single
* {@link org.apache.spark.network.protocol.ResponseMessage} (either success or failure).
*/
public final class ChunkFetchRequest extends AbstractMessage implements RequestMessage {
public final StreamChunkId streamChunkId;
public ChunkFetchRequest(StreamChunkId streamChunkId) {
this.streamChunkId = streamChunkId;
}
@Override
public Type type() { return Type.ChunkFetchRequest; }
@Override
public int encodedLength() {
return streamChunkId.encodedLength();
}
@Override
public void encode(ByteBuf buf) {
streamChunkId.encode(buf);
}
public static ChunkFetchRequest decode(ByteBuf buf) {
return new ChunkFetchRequest(StreamChunkId.decode(buf));
}
@Override
public int hashCode() {
return streamChunkId.hashCode();
}
@Override
public boolean equals(Object other) {
if (other instanceof ChunkFetchRequest) {
ChunkFetchRequest o = (ChunkFetchRequest) other;
return streamChunkId.equals(o.streamChunkId);
}
return false;
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("streamChunkId", streamChunkId)
.toString();
}
}
| 9,847 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/StreamChunkId.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
/**
* Encapsulates a request for a particular chunk of a stream.
*/
public final class StreamChunkId implements Encodable {
public final long streamId;
public final int chunkIndex;
public StreamChunkId(long streamId, int chunkIndex) {
this.streamId = streamId;
this.chunkIndex = chunkIndex;
}
@Override
public int encodedLength() {
return 8 + 4;
}
public void encode(ByteBuf buffer) {
buffer.writeLong(streamId);
buffer.writeInt(chunkIndex);
}
public static StreamChunkId decode(ByteBuf buffer) {
assert buffer.readableBytes() >= 8 + 4;
long streamId = buffer.readLong();
int chunkIndex = buffer.readInt();
return new StreamChunkId(streamId, chunkIndex);
}
@Override
public int hashCode() {
return Objects.hashCode(streamId, chunkIndex);
}
@Override
public boolean equals(Object other) {
if (other instanceof StreamChunkId) {
StreamChunkId o = (StreamChunkId) other;
return streamId == o.streamId && chunkIndex == o.chunkIndex;
}
return false;
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("streamId", streamId)
.add("chunkIndex", chunkIndex)
.toString();
}
}
| 9,848 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/StreamFailure.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
/**
* Message indicating an error when transferring a stream.
*/
public final class StreamFailure extends AbstractMessage implements ResponseMessage {
public final String streamId;
public final String error;
public StreamFailure(String streamId, String error) {
this.streamId = streamId;
this.error = error;
}
@Override
public Type type() { return Type.StreamFailure; }
@Override
public int encodedLength() {
return Encoders.Strings.encodedLength(streamId) + Encoders.Strings.encodedLength(error);
}
@Override
public void encode(ByteBuf buf) {
Encoders.Strings.encode(buf, streamId);
Encoders.Strings.encode(buf, error);
}
public static StreamFailure decode(ByteBuf buf) {
String streamId = Encoders.Strings.decode(buf);
String error = Encoders.Strings.decode(buf);
return new StreamFailure(streamId, error);
}
@Override
public int hashCode() {
return Objects.hashCode(streamId, error);
}
@Override
public boolean equals(Object other) {
if (other instanceof StreamFailure) {
StreamFailure o = (StreamFailure) other;
return streamId.equals(o.streamId) && error.equals(o.error);
}
return false;
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("streamId", streamId)
.add("error", error)
.toString();
}
}
| 9,849 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/RpcFailure.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
/** Response to {@link RpcRequest} for a failed RPC. */
public final class RpcFailure extends AbstractMessage implements ResponseMessage {
public final long requestId;
public final String errorString;
public RpcFailure(long requestId, String errorString) {
this.requestId = requestId;
this.errorString = errorString;
}
@Override
public Type type() { return Type.RpcFailure; }
@Override
public int encodedLength() {
return 8 + Encoders.Strings.encodedLength(errorString);
}
@Override
public void encode(ByteBuf buf) {
buf.writeLong(requestId);
Encoders.Strings.encode(buf, errorString);
}
public static RpcFailure decode(ByteBuf buf) {
long requestId = buf.readLong();
String errorString = Encoders.Strings.decode(buf);
return new RpcFailure(requestId, errorString);
}
@Override
public int hashCode() {
return Objects.hashCode(requestId, errorString);
}
@Override
public boolean equals(Object other) {
if (other instanceof RpcFailure) {
RpcFailure o = (RpcFailure) other;
return requestId == o.requestId && errorString.equals(o.errorString);
}
return false;
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("requestId", requestId)
.add("errorString", errorString)
.toString();
}
}
| 9,850 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/UploadStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import java.io.IOException;
import java.nio.ByteBuffer;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NettyManagedBuffer;
/**
* An RPC with data that is sent outside of the frame, so it can be read as a stream.
*/
public final class UploadStream extends AbstractMessage implements RequestMessage {
/** Used to link an RPC request with its response. */
public final long requestId;
public final ManagedBuffer meta;
public final long bodyByteCount;
public UploadStream(long requestId, ManagedBuffer meta, ManagedBuffer body) {
super(body, false); // body is *not* included in the frame
this.requestId = requestId;
this.meta = meta;
bodyByteCount = body.size();
}
// this version is called when decoding the bytes on the receiving end. The body is handled
// separately.
private UploadStream(long requestId, ManagedBuffer meta, long bodyByteCount) {
super(null, false);
this.requestId = requestId;
this.meta = meta;
this.bodyByteCount = bodyByteCount;
}
@Override
public Type type() { return Type.UploadStream; }
@Override
public int encodedLength() {
// the requestId, meta size, meta and bodyByteCount (body is not included)
return 8 + 4 + ((int) meta.size()) + 8;
}
@Override
public void encode(ByteBuf buf) {
buf.writeLong(requestId);
try {
ByteBuffer metaBuf = meta.nioByteBuffer();
buf.writeInt(metaBuf.remaining());
buf.writeBytes(metaBuf);
} catch (IOException io) {
throw new RuntimeException(io);
}
buf.writeLong(bodyByteCount);
}
public static UploadStream decode(ByteBuf buf) {
long requestId = buf.readLong();
int metaSize = buf.readInt();
ManagedBuffer meta = new NettyManagedBuffer(buf.readRetainedSlice(metaSize));
long bodyByteCount = buf.readLong();
// This is called by the frame decoder, so the data is still null. We need a StreamInterceptor
// to read the data.
return new UploadStream(requestId, meta, bodyByteCount);
}
@Override
public int hashCode() {
return Long.hashCode(requestId);
}
@Override
public boolean equals(Object other) {
if (other instanceof UploadStream) {
UploadStream o = (UploadStream) other;
return requestId == o.requestId && super.equals(o);
}
return false;
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("requestId", requestId)
.add("body", body())
.toString();
}
}
| 9,851 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/ResponseMessage.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
/** Messages from the server to the client. */
public interface ResponseMessage extends Message {
// token interface
}
| 9,852 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/StreamResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
import org.apache.spark.network.buffer.ManagedBuffer;
/**
* Response to {@link StreamRequest} when the stream has been successfully opened.
* <p>
* Note the message itself does not contain the stream data. That is written separately by the
* sender. The receiver is expected to set a temporary channel handler that will consume the
* number of bytes this message says the stream has.
*/
public final class StreamResponse extends AbstractResponseMessage {
public final String streamId;
public final long byteCount;
public StreamResponse(String streamId, long byteCount, ManagedBuffer buffer) {
super(buffer, false);
this.streamId = streamId;
this.byteCount = byteCount;
}
@Override
public Type type() { return Type.StreamResponse; }
@Override
public int encodedLength() {
return 8 + Encoders.Strings.encodedLength(streamId);
}
/** Encoding does NOT include 'buffer' itself. See {@link MessageEncoder}. */
@Override
public void encode(ByteBuf buf) {
Encoders.Strings.encode(buf, streamId);
buf.writeLong(byteCount);
}
@Override
public ResponseMessage createFailureResponse(String error) {
return new StreamFailure(streamId, error);
}
public static StreamResponse decode(ByteBuf buf) {
String streamId = Encoders.Strings.decode(buf);
long byteCount = buf.readLong();
return new StreamResponse(streamId, byteCount, null);
}
@Override
public int hashCode() {
return Objects.hashCode(byteCount, streamId);
}
@Override
public boolean equals(Object other) {
if (other instanceof StreamResponse) {
StreamResponse o = (StreamResponse) other;
return byteCount == o.byteCount && streamId.equals(o.streamId);
}
return false;
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("streamId", streamId)
.add("byteCount", byteCount)
.add("body", body())
.toString();
}
}
| 9,853 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/StreamRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
/**
* Request to stream data from the remote end.
* <p>
* The stream ID is an arbitrary string that needs to be negotiated between the two endpoints before
* the data can be streamed.
*/
public final class StreamRequest extends AbstractMessage implements RequestMessage {
public final String streamId;
public StreamRequest(String streamId) {
this.streamId = streamId;
}
@Override
public Type type() { return Type.StreamRequest; }
@Override
public int encodedLength() {
return Encoders.Strings.encodedLength(streamId);
}
@Override
public void encode(ByteBuf buf) {
Encoders.Strings.encode(buf, streamId);
}
public static StreamRequest decode(ByteBuf buf) {
String streamId = Encoders.Strings.decode(buf);
return new StreamRequest(streamId);
}
@Override
public int hashCode() {
return Objects.hashCode(streamId);
}
@Override
public boolean equals(Object other) {
if (other instanceof StreamRequest) {
StreamRequest o = (StreamRequest) other;
return streamId.equals(o.streamId);
}
return false;
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("streamId", streamId)
.toString();
}
}
| 9,854 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/AbstractResponseMessage.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import org.apache.spark.network.buffer.ManagedBuffer;
/**
* Abstract class for response messages.
*/
public abstract class AbstractResponseMessage extends AbstractMessage implements ResponseMessage {
protected AbstractResponseMessage(ManagedBuffer body, boolean isBodyInFrame) {
super(body, isBodyInFrame);
}
public abstract ResponseMessage createFailureResponse(String error);
}
| 9,855 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/RpcRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NettyManagedBuffer;
/**
* A generic RPC which is handled by a remote {@link org.apache.spark.network.server.RpcHandler}.
* This will correspond to a single
* {@link org.apache.spark.network.protocol.ResponseMessage} (either success or failure).
*/
public final class RpcRequest extends AbstractMessage implements RequestMessage {
/** Used to link an RPC request with its response. */
public final long requestId;
public RpcRequest(long requestId, ManagedBuffer message) {
super(message, true);
this.requestId = requestId;
}
@Override
public Type type() { return Type.RpcRequest; }
@Override
public int encodedLength() {
// The integer (a.k.a. the body size) is not really used, since that information is already
// encoded in the frame length. But this maintains backwards compatibility with versions of
// RpcRequest that use Encoders.ByteArrays.
return 8 + 4;
}
@Override
public void encode(ByteBuf buf) {
buf.writeLong(requestId);
// See comment in encodedLength().
buf.writeInt((int) body().size());
}
public static RpcRequest decode(ByteBuf buf) {
long requestId = buf.readLong();
// See comment in encodedLength().
buf.readInt();
return new RpcRequest(requestId, new NettyManagedBuffer(buf.retain()));
}
@Override
public int hashCode() {
return Objects.hashCode(requestId, body());
}
@Override
public boolean equals(Object other) {
if (other instanceof RpcRequest) {
RpcRequest o = (RpcRequest) other;
return requestId == o.requestId && super.equals(o);
}
return false;
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("requestId", requestId)
.add("body", body())
.toString();
}
}
| 9,856 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/Encodable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import io.netty.buffer.ByteBuf;
/**
* Interface for an object which can be encoded into a ByteBuf. Multiple Encodable objects are
* stored in a single, pre-allocated ByteBuf, so Encodables must also provide their length.
*
* Encodable objects should provide a static "decode(ByteBuf)" method which is invoked by
* {@link MessageDecoder}. During decoding, if the object uses the ByteBuf as its data (rather than
* just copying data from it), then you must retain() the ByteBuf.
*
* Additionally, when adding a new Encodable Message, add it to {@link Message.Type}.
*/
public interface Encodable {
/** Number of bytes of the encoded form of this object. */
int encodedLength();
/**
* Serializes this object by writing into the given ByteBuf.
* This method must write exactly encodedLength() bytes.
*/
void encode(ByteBuf buf);
}
| 9,857 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/Encoders.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import java.nio.charset.StandardCharsets;
import io.netty.buffer.ByteBuf;
/** Provides a canonical set of Encoders for simple types. */
public class Encoders {
/** Strings are encoded with their length followed by UTF-8 bytes. */
public static class Strings {
public static int encodedLength(String s) {
return 4 + s.getBytes(StandardCharsets.UTF_8).length;
}
public static void encode(ByteBuf buf, String s) {
byte[] bytes = s.getBytes(StandardCharsets.UTF_8);
buf.writeInt(bytes.length);
buf.writeBytes(bytes);
}
public static String decode(ByteBuf buf) {
int length = buf.readInt();
byte[] bytes = new byte[length];
buf.readBytes(bytes);
return new String(bytes, StandardCharsets.UTF_8);
}
}
/** Byte arrays are encoded with their length followed by bytes. */
public static class ByteArrays {
public static int encodedLength(byte[] arr) {
return 4 + arr.length;
}
public static void encode(ByteBuf buf, byte[] arr) {
buf.writeInt(arr.length);
buf.writeBytes(arr);
}
public static byte[] decode(ByteBuf buf) {
int length = buf.readInt();
byte[] bytes = new byte[length];
buf.readBytes(bytes);
return bytes;
}
}
/** String arrays are encoded with the number of strings followed by per-String encoding. */
public static class StringArrays {
public static int encodedLength(String[] strings) {
int totalLength = 4;
for (String s : strings) {
totalLength += Strings.encodedLength(s);
}
return totalLength;
}
public static void encode(ByteBuf buf, String[] strings) {
buf.writeInt(strings.length);
for (String s : strings) {
Strings.encode(buf, s);
}
}
public static String[] decode(ByteBuf buf) {
int numStrings = buf.readInt();
String[] strings = new String[numStrings];
for (int i = 0; i < strings.length; i ++) {
strings[i] = Strings.decode(buf);
}
return strings;
}
}
}
| 9,858 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/MessageEncoder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import java.util.List;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.MessageToMessageEncoder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Encoder used by the server side to encode server-to-client responses.
* This encoder is stateless so it is safe to be shared by multiple threads.
*/
@ChannelHandler.Sharable
public final class MessageEncoder extends MessageToMessageEncoder<Message> {
private static final Logger logger = LoggerFactory.getLogger(MessageEncoder.class);
public static final MessageEncoder INSTANCE = new MessageEncoder();
private MessageEncoder() {}
/***
* Encodes a Message by invoking its encode() method. For non-data messages, we will add one
* ByteBuf to 'out' containing the total frame length, the message type, and the message itself.
* In the case of a ChunkFetchSuccess, we will also add the ManagedBuffer corresponding to the
* data to 'out', in order to enable zero-copy transfer.
*/
@Override
public void encode(ChannelHandlerContext ctx, Message in, List<Object> out) throws Exception {
Object body = null;
long bodyLength = 0;
boolean isBodyInFrame = false;
// If the message has a body, take it out to enable zero-copy transfer for the payload.
if (in.body() != null) {
try {
bodyLength = in.body().size();
body = in.body().convertToNetty();
isBodyInFrame = in.isBodyInFrame();
} catch (Exception e) {
in.body().release();
if (in instanceof AbstractResponseMessage) {
AbstractResponseMessage resp = (AbstractResponseMessage) in;
// Re-encode this message as a failure response.
String error = e.getMessage() != null ? e.getMessage() : "null";
logger.error(String.format("Error processing %s for client %s",
in, ctx.channel().remoteAddress()), e);
encode(ctx, resp.createFailureResponse(error), out);
} else {
throw e;
}
return;
}
}
Message.Type msgType = in.type();
// All messages have the frame length, message type, and message itself. The frame length
// may optionally include the length of the body data, depending on what message is being
// sent.
int headerLength = 8 + msgType.encodedLength() + in.encodedLength();
long frameLength = headerLength + (isBodyInFrame ? bodyLength : 0);
ByteBuf header = ctx.alloc().heapBuffer(headerLength);
header.writeLong(frameLength);
msgType.encode(header);
in.encode(header);
assert header.writableBytes() == 0;
if (body != null) {
// We transfer ownership of the reference on in.body() to MessageWithHeader.
// This reference will be freed when MessageWithHeader.deallocate() is called.
out.add(new MessageWithHeader(in.body(), header, body, bodyLength));
} else {
out.add(header);
}
}
}
| 9,859 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/RpcResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NettyManagedBuffer;
/** Response to {@link RpcRequest} for a successful RPC. */
public final class RpcResponse extends AbstractResponseMessage {
public final long requestId;
public RpcResponse(long requestId, ManagedBuffer message) {
super(message, true);
this.requestId = requestId;
}
@Override
public Type type() { return Type.RpcResponse; }
@Override
public int encodedLength() {
// The integer (a.k.a. the body size) is not really used, since that information is already
// encoded in the frame length. But this maintains backwards compatibility with versions of
// RpcRequest that use Encoders.ByteArrays.
return 8 + 4;
}
@Override
public void encode(ByteBuf buf) {
buf.writeLong(requestId);
// See comment in encodedLength().
buf.writeInt((int) body().size());
}
@Override
public ResponseMessage createFailureResponse(String error) {
return new RpcFailure(requestId, error);
}
public static RpcResponse decode(ByteBuf buf) {
long requestId = buf.readLong();
// See comment in encodedLength().
buf.readInt();
return new RpcResponse(requestId, new NettyManagedBuffer(buf.retain()));
}
@Override
public int hashCode() {
return Objects.hashCode(requestId, body());
}
@Override
public boolean equals(Object other) {
if (other instanceof RpcResponse) {
RpcResponse o = (RpcResponse) other;
return requestId == o.requestId && super.equals(o);
}
return false;
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("requestId", requestId)
.add("body", body())
.toString();
}
}
| 9,860 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/ChunkFetchFailure.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
/**
* Response to {@link ChunkFetchRequest} when there is an error fetching the chunk.
*/
public final class ChunkFetchFailure extends AbstractMessage implements ResponseMessage {
public final StreamChunkId streamChunkId;
public final String errorString;
public ChunkFetchFailure(StreamChunkId streamChunkId, String errorString) {
this.streamChunkId = streamChunkId;
this.errorString = errorString;
}
@Override
public Type type() { return Type.ChunkFetchFailure; }
@Override
public int encodedLength() {
return streamChunkId.encodedLength() + Encoders.Strings.encodedLength(errorString);
}
@Override
public void encode(ByteBuf buf) {
streamChunkId.encode(buf);
Encoders.Strings.encode(buf, errorString);
}
public static ChunkFetchFailure decode(ByteBuf buf) {
StreamChunkId streamChunkId = StreamChunkId.decode(buf);
String errorString = Encoders.Strings.decode(buf);
return new ChunkFetchFailure(streamChunkId, errorString);
}
@Override
public int hashCode() {
return Objects.hashCode(streamChunkId, errorString);
}
@Override
public boolean equals(Object other) {
if (other instanceof ChunkFetchFailure) {
ChunkFetchFailure o = (ChunkFetchFailure) other;
return streamChunkId.equals(o.streamChunkId) && errorString.equals(o.errorString);
}
return false;
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("streamChunkId", streamChunkId)
.add("errorString", errorString)
.toString();
}
}
| 9,861 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/MessageWithHeader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.WritableByteChannel;
import javax.annotation.Nullable;
import com.google.common.base.Preconditions;
import io.netty.buffer.ByteBuf;
import io.netty.channel.FileRegion;
import io.netty.util.ReferenceCountUtil;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.util.AbstractFileRegion;
/**
* A wrapper message that holds two separate pieces (a header and a body).
*
* The header must be a ByteBuf, while the body can be a ByteBuf or a FileRegion.
*/
class MessageWithHeader extends AbstractFileRegion {
@Nullable private final ManagedBuffer managedBuffer;
private final ByteBuf header;
private final int headerLength;
private final Object body;
private final long bodyLength;
private long totalBytesTransferred;
/**
* When the write buffer size is larger than this limit, I/O will be done in chunks of this size.
* The size should not be too large as it will waste underlying memory copy. e.g. If network
* available buffer is smaller than this limit, the data cannot be sent within one single write
* operation while it still will make memory copy with this size.
*/
private static final int NIO_BUFFER_LIMIT = 256 * 1024;
/**
* Construct a new MessageWithHeader.
*
* @param managedBuffer the {@link ManagedBuffer} that the message body came from. This needs to
* be passed in so that the buffer can be freed when this message is
* deallocated. Ownership of the caller's reference to this buffer is
* transferred to this class, so if the caller wants to continue to use the
* ManagedBuffer in other messages then they will need to call retain() on
* it before passing it to this constructor. This may be null if and only if
* `body` is a {@link FileRegion}.
* @param header the message header.
* @param body the message body. Must be either a {@link ByteBuf} or a {@link FileRegion}.
* @param bodyLength the length of the message body, in bytes.
*/
MessageWithHeader(
@Nullable ManagedBuffer managedBuffer,
ByteBuf header,
Object body,
long bodyLength) {
Preconditions.checkArgument(body instanceof ByteBuf || body instanceof FileRegion,
"Body must be a ByteBuf or a FileRegion.");
this.managedBuffer = managedBuffer;
this.header = header;
this.headerLength = header.readableBytes();
this.body = body;
this.bodyLength = bodyLength;
}
@Override
public long count() {
return headerLength + bodyLength;
}
@Override
public long position() {
return 0;
}
@Override
public long transferred() {
return totalBytesTransferred;
}
/**
* This code is more complicated than you would think because we might require multiple
* transferTo invocations in order to transfer a single MessageWithHeader to avoid busy waiting.
*
* The contract is that the caller will ensure position is properly set to the total number
* of bytes transferred so far (i.e. value returned by transferred()).
*/
@Override
public long transferTo(final WritableByteChannel target, final long position) throws IOException {
Preconditions.checkArgument(position == totalBytesTransferred, "Invalid position.");
// Bytes written for header in this call.
long writtenHeader = 0;
if (header.readableBytes() > 0) {
writtenHeader = copyByteBuf(header, target);
totalBytesTransferred += writtenHeader;
if (header.readableBytes() > 0) {
return writtenHeader;
}
}
// Bytes written for body in this call.
long writtenBody = 0;
if (body instanceof FileRegion) {
writtenBody = ((FileRegion) body).transferTo(target, totalBytesTransferred - headerLength);
} else if (body instanceof ByteBuf) {
writtenBody = copyByteBuf((ByteBuf) body, target);
}
totalBytesTransferred += writtenBody;
return writtenHeader + writtenBody;
}
@Override
protected void deallocate() {
header.release();
ReferenceCountUtil.release(body);
if (managedBuffer != null) {
managedBuffer.release();
}
}
private int copyByteBuf(ByteBuf buf, WritableByteChannel target) throws IOException {
// SPARK-24578: cap the sub-region's size of returned nio buffer to improve the performance
// for the case that the passed-in buffer has too many components.
int length = Math.min(buf.readableBytes(), NIO_BUFFER_LIMIT);
// If the ByteBuf holds more then one ByteBuffer we should better call nioBuffers(...)
// to eliminate extra memory copies.
int written = 0;
if (buf.nioBufferCount() == 1) {
ByteBuffer buffer = buf.nioBuffer(buf.readerIndex(), length);
written = target.write(buffer);
} else {
ByteBuffer[] buffers = buf.nioBuffers(buf.readerIndex(), length);
for (ByteBuffer buffer: buffers) {
int remaining = buffer.remaining();
int w = target.write(buffer);
written += w;
if (w < remaining) {
// Could not write all, we need to break now.
break;
}
}
}
buf.skipBytes(written);
return written;
}
@Override
public MessageWithHeader touch(Object o) {
super.touch(o);
header.touch(o);
ReferenceCountUtil.touch(body, o);
return this;
}
@Override
public MessageWithHeader retain(int increment) {
super.retain(increment);
header.retain(increment);
ReferenceCountUtil.retain(body, increment);
if (managedBuffer != null) {
for (int i = 0; i < increment; i++) {
managedBuffer.retain();
}
}
return this;
}
@Override
public boolean release(int decrement) {
header.release(decrement);
ReferenceCountUtil.release(body, decrement);
if (managedBuffer != null) {
for (int i = 0; i < decrement; i++) {
managedBuffer.release();
}
}
return super.release(decrement);
}
}
| 9,862 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/protocol/OneWayMessage.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.protocol;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NettyManagedBuffer;
/**
* A RPC that does not expect a reply, which is handled by a remote
* {@link org.apache.spark.network.server.RpcHandler}.
*/
public final class OneWayMessage extends AbstractMessage implements RequestMessage {
public OneWayMessage(ManagedBuffer body) {
super(body, true);
}
@Override
public Type type() { return Type.OneWayMessage; }
@Override
public int encodedLength() {
// The integer (a.k.a. the body size) is not really used, since that information is already
// encoded in the frame length. But this maintains backwards compatibility with versions of
// RpcRequest that use Encoders.ByteArrays.
return 4;
}
@Override
public void encode(ByteBuf buf) {
// See comment in encodedLength().
buf.writeInt((int) body().size());
}
public static OneWayMessage decode(ByteBuf buf) {
// See comment in encodedLength().
buf.readInt();
return new OneWayMessage(new NettyManagedBuffer(buf.retain()));
}
@Override
public int hashCode() {
return Objects.hashCode(body());
}
@Override
public boolean equals(Object other) {
if (other instanceof OneWayMessage) {
OneWayMessage o = (OneWayMessage) other;
return super.equals(o);
}
return false;
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("body", body())
.toString();
}
}
| 9,863 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/client/ChunkFetchFailureException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.client;
/**
* General exception caused by a remote exception while fetching a chunk.
*/
public class ChunkFetchFailureException extends RuntimeException {
public ChunkFetchFailureException(String errorMsg, Throwable cause) {
super(errorMsg, cause);
}
public ChunkFetchFailureException(String errorMsg) {
super(errorMsg);
}
}
| 9,864 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/client/RpcResponseCallback.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.client;
import java.nio.ByteBuffer;
/**
* Callback for the result of a single RPC. This will be invoked once with either success or
* failure.
*/
public interface RpcResponseCallback {
/**
* Successful serialized result from server.
*
* After `onSuccess` returns, `response` will be recycled and its content will become invalid.
* Please copy the content of `response` if you want to use it after `onSuccess` returns.
*/
void onSuccess(ByteBuffer response);
/** Exception either propagated from server or raised on client side. */
void onFailure(Throwable e);
}
| 9,865 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/client/StreamInterceptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.client;
import java.nio.ByteBuffer;
import java.nio.channels.ClosedChannelException;
import io.netty.buffer.ByteBuf;
import org.apache.spark.network.protocol.Message;
import org.apache.spark.network.server.MessageHandler;
import org.apache.spark.network.util.TransportFrameDecoder;
/**
* An interceptor that is registered with the frame decoder to feed stream data to a
* callback.
*/
public class StreamInterceptor<T extends Message> implements TransportFrameDecoder.Interceptor {
private final MessageHandler<T> handler;
private final String streamId;
private final long byteCount;
private final StreamCallback callback;
private long bytesRead;
public StreamInterceptor(
MessageHandler<T> handler,
String streamId,
long byteCount,
StreamCallback callback) {
this.handler = handler;
this.streamId = streamId;
this.byteCount = byteCount;
this.callback = callback;
this.bytesRead = 0;
}
@Override
public void exceptionCaught(Throwable cause) throws Exception {
deactivateStream();
callback.onFailure(streamId, cause);
}
@Override
public void channelInactive() throws Exception {
deactivateStream();
callback.onFailure(streamId, new ClosedChannelException());
}
private void deactivateStream() {
if (handler instanceof TransportResponseHandler) {
// we only have to do this for TransportResponseHandler as it exposes numOutstandingFetches
// (there is no extra cleanup that needs to happen)
((TransportResponseHandler) handler).deactivateStream();
}
}
@Override
public boolean handle(ByteBuf buf) throws Exception {
int toRead = (int) Math.min(buf.readableBytes(), byteCount - bytesRead);
ByteBuffer nioBuffer = buf.readSlice(toRead).nioBuffer();
int available = nioBuffer.remaining();
callback.onData(streamId, nioBuffer);
bytesRead += available;
if (bytesRead > byteCount) {
RuntimeException re = new IllegalStateException(String.format(
"Read too many bytes? Expected %d, but read %d.", byteCount, bytesRead));
callback.onFailure(streamId, re);
deactivateStream();
throw re;
} else if (bytesRead == byteCount) {
deactivateStream();
callback.onComplete(streamId);
}
return bytesRead != byteCount;
}
}
| 9,866 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/client/TransportClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.client;
import java.io.Closeable;
import java.io.IOException;
import java.net.SocketAddress;
import java.nio.ByteBuffer;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import javax.annotation.Nullable;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.util.concurrent.SettableFuture;
import io.netty.channel.Channel;
import io.netty.util.concurrent.Future;
import io.netty.util.concurrent.GenericFutureListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NioManagedBuffer;
import org.apache.spark.network.protocol.*;
import static org.apache.spark.network.util.NettyUtils.getRemoteAddress;
/**
* Client for fetching consecutive chunks of a pre-negotiated stream. This API is intended to allow
* efficient transfer of a large amount of data, broken up into chunks with size ranging from
* hundreds of KB to a few MB.
*
* Note that while this client deals with the fetching of chunks from a stream (i.e., data plane),
* the actual setup of the streams is done outside the scope of the transport layer. The convenience
* method "sendRPC" is provided to enable control plane communication between the client and server
* to perform this setup.
*
* For example, a typical workflow might be:
* client.sendRPC(new OpenFile("/foo")) --> returns StreamId = 100
* client.fetchChunk(streamId = 100, chunkIndex = 0, callback)
* client.fetchChunk(streamId = 100, chunkIndex = 1, callback)
* ...
* client.sendRPC(new CloseStream(100))
*
* Construct an instance of TransportClient using {@link TransportClientFactory}. A single
* TransportClient may be used for multiple streams, but any given stream must be restricted to a
* single client, in order to avoid out-of-order responses.
*
* NB: This class is used to make requests to the server, while {@link TransportResponseHandler} is
* responsible for handling responses from the server.
*
* Concurrency: thread safe and can be called from multiple threads.
*/
public class TransportClient implements Closeable {
private static final Logger logger = LoggerFactory.getLogger(TransportClient.class);
private final Channel channel;
private final TransportResponseHandler handler;
@Nullable private String clientId;
private volatile boolean timedOut;
public TransportClient(Channel channel, TransportResponseHandler handler) {
this.channel = Preconditions.checkNotNull(channel);
this.handler = Preconditions.checkNotNull(handler);
this.timedOut = false;
}
public Channel getChannel() {
return channel;
}
public boolean isActive() {
return !timedOut && (channel.isOpen() || channel.isActive());
}
public SocketAddress getSocketAddress() {
return channel.remoteAddress();
}
/**
* Returns the ID used by the client to authenticate itself when authentication is enabled.
*
* @return The client ID, or null if authentication is disabled.
*/
public String getClientId() {
return clientId;
}
/**
* Sets the authenticated client ID. This is meant to be used by the authentication layer.
*
* Trying to set a different client ID after it's been set will result in an exception.
*/
public void setClientId(String id) {
Preconditions.checkState(clientId == null, "Client ID has already been set.");
this.clientId = id;
}
/**
* Requests a single chunk from the remote side, from the pre-negotiated streamId.
*
* Chunk indices go from 0 onwards. It is valid to request the same chunk multiple times, though
* some streams may not support this.
*
* Multiple fetchChunk requests may be outstanding simultaneously, and the chunks are guaranteed
* to be returned in the same order that they were requested, assuming only a single
* TransportClient is used to fetch the chunks.
*
* @param streamId Identifier that refers to a stream in the remote StreamManager. This should
* be agreed upon by client and server beforehand.
* @param chunkIndex 0-based index of the chunk to fetch
* @param callback Callback invoked upon successful receipt of chunk, or upon any failure.
*/
public void fetchChunk(
long streamId,
int chunkIndex,
ChunkReceivedCallback callback) {
if (logger.isDebugEnabled()) {
logger.debug("Sending fetch chunk request {} to {}", chunkIndex, getRemoteAddress(channel));
}
StreamChunkId streamChunkId = new StreamChunkId(streamId, chunkIndex);
StdChannelListener listener = new StdChannelListener(streamChunkId) {
@Override
void handleFailure(String errorMsg, Throwable cause) {
handler.removeFetchRequest(streamChunkId);
callback.onFailure(chunkIndex, new IOException(errorMsg, cause));
}
};
handler.addFetchRequest(streamChunkId, callback);
channel.writeAndFlush(new ChunkFetchRequest(streamChunkId)).addListener(listener);
}
/**
* Request to stream the data with the given stream ID from the remote end.
*
* @param streamId The stream to fetch.
* @param callback Object to call with the stream data.
*/
public void stream(String streamId, StreamCallback callback) {
StdChannelListener listener = new StdChannelListener(streamId) {
@Override
void handleFailure(String errorMsg, Throwable cause) throws Exception {
callback.onFailure(streamId, new IOException(errorMsg, cause));
}
};
if (logger.isDebugEnabled()) {
logger.debug("Sending stream request for {} to {}", streamId, getRemoteAddress(channel));
}
// Need to synchronize here so that the callback is added to the queue and the RPC is
// written to the socket atomically, so that callbacks are called in the right order
// when responses arrive.
synchronized (this) {
handler.addStreamCallback(streamId, callback);
channel.writeAndFlush(new StreamRequest(streamId)).addListener(listener);
}
}
/**
* Sends an opaque message to the RpcHandler on the server-side. The callback will be invoked
* with the server's response or upon any failure.
*
* @param message The message to send.
* @param callback Callback to handle the RPC's reply.
* @return The RPC's id.
*/
public long sendRpc(ByteBuffer message, RpcResponseCallback callback) {
if (logger.isTraceEnabled()) {
logger.trace("Sending RPC to {}", getRemoteAddress(channel));
}
long requestId = requestId();
handler.addRpcRequest(requestId, callback);
RpcChannelListener listener = new RpcChannelListener(requestId, callback);
channel.writeAndFlush(new RpcRequest(requestId, new NioManagedBuffer(message)))
.addListener(listener);
return requestId;
}
/**
* Send data to the remote end as a stream. This differs from stream() in that this is a request
* to *send* data to the remote end, not to receive it from the remote.
*
* @param meta meta data associated with the stream, which will be read completely on the
* receiving end before the stream itself.
* @param data this will be streamed to the remote end to allow for transferring large amounts
* of data without reading into memory.
* @param callback handles the reply -- onSuccess will only be called when both message and data
* are received successfully.
*/
public long uploadStream(
ManagedBuffer meta,
ManagedBuffer data,
RpcResponseCallback callback) {
if (logger.isTraceEnabled()) {
logger.trace("Sending RPC to {}", getRemoteAddress(channel));
}
long requestId = requestId();
handler.addRpcRequest(requestId, callback);
RpcChannelListener listener = new RpcChannelListener(requestId, callback);
channel.writeAndFlush(new UploadStream(requestId, meta, data)).addListener(listener);
return requestId;
}
/**
* Synchronously sends an opaque message to the RpcHandler on the server-side, waiting for up to
* a specified timeout for a response.
*/
public ByteBuffer sendRpcSync(ByteBuffer message, long timeoutMs) {
final SettableFuture<ByteBuffer> result = SettableFuture.create();
sendRpc(message, new RpcResponseCallback() {
@Override
public void onSuccess(ByteBuffer response) {
try {
ByteBuffer copy = ByteBuffer.allocate(response.remaining());
copy.put(response);
// flip "copy" to make it readable
copy.flip();
result.set(copy);
} catch (Throwable t) {
logger.warn("Error in responding PRC callback", t);
result.setException(t);
}
}
@Override
public void onFailure(Throwable e) {
result.setException(e);
}
});
try {
return result.get(timeoutMs, TimeUnit.MILLISECONDS);
} catch (ExecutionException e) {
throw Throwables.propagate(e.getCause());
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
/**
* Sends an opaque message to the RpcHandler on the server-side. No reply is expected for the
* message, and no delivery guarantees are made.
*
* @param message The message to send.
*/
public void send(ByteBuffer message) {
channel.writeAndFlush(new OneWayMessage(new NioManagedBuffer(message)));
}
/**
* Removes any state associated with the given RPC.
*
* @param requestId The RPC id returned by {@link #sendRpc(ByteBuffer, RpcResponseCallback)}.
*/
public void removeRpcRequest(long requestId) {
handler.removeRpcRequest(requestId);
}
/** Mark this channel as having timed out. */
public void timeOut() {
this.timedOut = true;
}
@VisibleForTesting
public TransportResponseHandler getHandler() {
return handler;
}
@Override
public void close() {
// close is a local operation and should finish with milliseconds; timeout just to be safe
channel.close().awaitUninterruptibly(10, TimeUnit.SECONDS);
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("remoteAdress", channel.remoteAddress())
.add("clientId", clientId)
.add("isActive", isActive())
.toString();
}
private static long requestId() {
return Math.abs(UUID.randomUUID().getLeastSignificantBits());
}
private class StdChannelListener
implements GenericFutureListener<Future<? super Void>> {
final long startTime;
final Object requestId;
StdChannelListener(Object requestId) {
this.startTime = System.currentTimeMillis();
this.requestId = requestId;
}
@Override
public void operationComplete(Future<? super Void> future) throws Exception {
if (future.isSuccess()) {
if (logger.isTraceEnabled()) {
long timeTaken = System.currentTimeMillis() - startTime;
logger.trace("Sending request {} to {} took {} ms", requestId,
getRemoteAddress(channel), timeTaken);
}
} else {
String errorMsg = String.format("Failed to send RPC %s to %s: %s", requestId,
getRemoteAddress(channel), future.cause());
logger.error(errorMsg, future.cause());
channel.close();
try {
handleFailure(errorMsg, future.cause());
} catch (Exception e) {
logger.error("Uncaught exception in RPC response callback handler!", e);
}
}
}
void handleFailure(String errorMsg, Throwable cause) throws Exception {}
}
private class RpcChannelListener extends StdChannelListener {
final long rpcRequestId;
final RpcResponseCallback callback;
RpcChannelListener(long rpcRequestId, RpcResponseCallback callback) {
super("RPC " + rpcRequestId);
this.rpcRequestId = rpcRequestId;
this.callback = callback;
}
@Override
void handleFailure(String errorMsg, Throwable cause) {
handler.removeRpcRequest(rpcRequestId);
callback.onFailure(new IOException(errorMsg, cause));
}
}
}
| 9,867 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/client/ChunkReceivedCallback.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.client;
import org.apache.spark.network.buffer.ManagedBuffer;
/**
* Callback for the result of a single chunk result. For a single stream, the callbacks are
* guaranteed to be called by the same thread in the same order as the requests for chunks were
* made.
*
* Note that if a general stream failure occurs, all outstanding chunk requests may be failed.
*/
public interface ChunkReceivedCallback {
/**
* Called upon receipt of a particular chunk.
*
* The given buffer will initially have a refcount of 1, but will be release()'d as soon as this
* call returns. You must therefore either retain() the buffer or copy its contents before
* returning.
*/
void onSuccess(int chunkIndex, ManagedBuffer buffer);
/**
* Called upon failure to fetch a particular chunk. Note that this may actually be called due
* to failure to fetch a prior chunk in this stream.
*
* After receiving a failure, the stream may or may not be valid. The client should not assume
* that the server's side of the stream has been closed.
*/
void onFailure(int chunkIndex, Throwable e);
}
| 9,868 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.client;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.util.List;
import java.util.Random;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicReference;
import com.codahale.metrics.MetricSet;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import io.netty.bootstrap.Bootstrap;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.TransportContext;
import org.apache.spark.network.server.TransportChannelHandler;
import org.apache.spark.network.util.*;
/**
* Factory for creating {@link TransportClient}s by using createClient.
*
* The factory maintains a connection pool to other hosts and should return the same
* TransportClient for the same remote host. It also shares a single worker thread pool for
* all TransportClients.
*
* TransportClients will be reused whenever possible. Prior to completing the creation of a new
* TransportClient, all given {@link TransportClientBootstrap}s will be run.
*/
public class TransportClientFactory implements Closeable {
/** A simple data structure to track the pool of clients between two peer nodes. */
private static class ClientPool {
TransportClient[] clients;
Object[] locks;
ClientPool(int size) {
clients = new TransportClient[size];
locks = new Object[size];
for (int i = 0; i < size; i++) {
locks[i] = new Object();
}
}
}
private static final Logger logger = LoggerFactory.getLogger(TransportClientFactory.class);
private final TransportContext context;
private final TransportConf conf;
private final List<TransportClientBootstrap> clientBootstraps;
private final ConcurrentHashMap<SocketAddress, ClientPool> connectionPool;
/** Random number generator for picking connections between peers. */
private final Random rand;
private final int numConnectionsPerPeer;
private final Class<? extends Channel> socketChannelClass;
private EventLoopGroup workerGroup;
private PooledByteBufAllocator pooledAllocator;
private final NettyMemoryMetrics metrics;
public TransportClientFactory(
TransportContext context,
List<TransportClientBootstrap> clientBootstraps) {
this.context = Preconditions.checkNotNull(context);
this.conf = context.getConf();
this.clientBootstraps = Lists.newArrayList(Preconditions.checkNotNull(clientBootstraps));
this.connectionPool = new ConcurrentHashMap<>();
this.numConnectionsPerPeer = conf.numConnectionsPerPeer();
this.rand = new Random();
IOMode ioMode = IOMode.valueOf(conf.ioMode());
this.socketChannelClass = NettyUtils.getClientChannelClass(ioMode);
this.workerGroup = NettyUtils.createEventLoop(
ioMode,
conf.clientThreads(),
conf.getModuleName() + "-client");
this.pooledAllocator = NettyUtils.createPooledByteBufAllocator(
conf.preferDirectBufs(), false /* allowCache */, conf.clientThreads());
this.metrics = new NettyMemoryMetrics(
this.pooledAllocator, conf.getModuleName() + "-client", conf);
}
public MetricSet getAllMetrics() {
return metrics;
}
/**
* Create a {@link TransportClient} connecting to the given remote host / port.
*
* We maintains an array of clients (size determined by spark.shuffle.io.numConnectionsPerPeer)
* and randomly picks one to use. If no client was previously created in the randomly selected
* spot, this function creates a new client and places it there.
*
* Prior to the creation of a new TransportClient, we will execute all
* {@link TransportClientBootstrap}s that are registered with this factory.
*
* This blocks until a connection is successfully established and fully bootstrapped.
*
* Concurrency: This method is safe to call from multiple threads.
*/
public TransportClient createClient(String remoteHost, int remotePort)
throws IOException, InterruptedException {
// Get connection from the connection pool first.
// If it is not found or not active, create a new one.
// Use unresolved address here to avoid DNS resolution each time we creates a client.
final InetSocketAddress unresolvedAddress =
InetSocketAddress.createUnresolved(remoteHost, remotePort);
// Create the ClientPool if we don't have it yet.
ClientPool clientPool = connectionPool.get(unresolvedAddress);
if (clientPool == null) {
connectionPool.putIfAbsent(unresolvedAddress, new ClientPool(numConnectionsPerPeer));
clientPool = connectionPool.get(unresolvedAddress);
}
int clientIndex = rand.nextInt(numConnectionsPerPeer);
TransportClient cachedClient = clientPool.clients[clientIndex];
if (cachedClient != null && cachedClient.isActive()) {
// Make sure that the channel will not timeout by updating the last use time of the
// handler. Then check that the client is still alive, in case it timed out before
// this code was able to update things.
TransportChannelHandler handler = cachedClient.getChannel().pipeline()
.get(TransportChannelHandler.class);
synchronized (handler) {
handler.getResponseHandler().updateTimeOfLastRequest();
}
if (cachedClient.isActive()) {
logger.trace("Returning cached connection to {}: {}",
cachedClient.getSocketAddress(), cachedClient);
return cachedClient;
}
}
// If we reach here, we don't have an existing connection open. Let's create a new one.
// Multiple threads might race here to create new connections. Keep only one of them active.
final long preResolveHost = System.nanoTime();
final InetSocketAddress resolvedAddress = new InetSocketAddress(remoteHost, remotePort);
final long hostResolveTimeMs = (System.nanoTime() - preResolveHost) / 1000000;
if (hostResolveTimeMs > 2000) {
logger.warn("DNS resolution for {} took {} ms", resolvedAddress, hostResolveTimeMs);
} else {
logger.trace("DNS resolution for {} took {} ms", resolvedAddress, hostResolveTimeMs);
}
synchronized (clientPool.locks[clientIndex]) {
cachedClient = clientPool.clients[clientIndex];
if (cachedClient != null) {
if (cachedClient.isActive()) {
logger.trace("Returning cached connection to {}: {}", resolvedAddress, cachedClient);
return cachedClient;
} else {
logger.info("Found inactive connection to {}, creating a new one.", resolvedAddress);
}
}
clientPool.clients[clientIndex] = createClient(resolvedAddress);
return clientPool.clients[clientIndex];
}
}
/**
* Create a completely new {@link TransportClient} to the given remote host / port.
* This connection is not pooled.
*
* As with {@link #createClient(String, int)}, this method is blocking.
*/
public TransportClient createUnmanagedClient(String remoteHost, int remotePort)
throws IOException, InterruptedException {
final InetSocketAddress address = new InetSocketAddress(remoteHost, remotePort);
return createClient(address);
}
/** Create a completely new {@link TransportClient} to the remote address. */
private TransportClient createClient(InetSocketAddress address)
throws IOException, InterruptedException {
logger.debug("Creating new connection to {}", address);
Bootstrap bootstrap = new Bootstrap();
bootstrap.group(workerGroup)
.channel(socketChannelClass)
// Disable Nagle's Algorithm since we don't want packets to wait
.option(ChannelOption.TCP_NODELAY, true)
.option(ChannelOption.SO_KEEPALIVE, true)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, conf.connectionTimeoutMs())
.option(ChannelOption.ALLOCATOR, pooledAllocator);
if (conf.receiveBuf() > 0) {
bootstrap.option(ChannelOption.SO_RCVBUF, conf.receiveBuf());
}
if (conf.sendBuf() > 0) {
bootstrap.option(ChannelOption.SO_SNDBUF, conf.sendBuf());
}
final AtomicReference<TransportClient> clientRef = new AtomicReference<>();
final AtomicReference<Channel> channelRef = new AtomicReference<>();
bootstrap.handler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) {
TransportChannelHandler clientHandler = context.initializePipeline(ch);
clientRef.set(clientHandler.getClient());
channelRef.set(ch);
}
});
// Connect to the remote server
long preConnect = System.nanoTime();
ChannelFuture cf = bootstrap.connect(address);
if (!cf.await(conf.connectionTimeoutMs())) {
throw new IOException(
String.format("Connecting to %s timed out (%s ms)", address, conf.connectionTimeoutMs()));
} else if (cf.cause() != null) {
throw new IOException(String.format("Failed to connect to %s", address), cf.cause());
}
TransportClient client = clientRef.get();
Channel channel = channelRef.get();
assert client != null : "Channel future completed successfully with null client";
// Execute any client bootstraps synchronously before marking the Client as successful.
long preBootstrap = System.nanoTime();
logger.debug("Connection to {} successful, running bootstraps...", address);
try {
for (TransportClientBootstrap clientBootstrap : clientBootstraps) {
clientBootstrap.doBootstrap(client, channel);
}
} catch (Exception e) { // catch non-RuntimeExceptions too as bootstrap may be written in Scala
long bootstrapTimeMs = (System.nanoTime() - preBootstrap) / 1000000;
logger.error("Exception while bootstrapping client after " + bootstrapTimeMs + " ms", e);
client.close();
throw Throwables.propagate(e);
}
long postBootstrap = System.nanoTime();
logger.info("Successfully created connection to {} after {} ms ({} ms spent in bootstraps)",
address, (postBootstrap - preConnect) / 1000000, (postBootstrap - preBootstrap) / 1000000);
return client;
}
/** Close all connections in the connection pool, and shutdown the worker thread pool. */
@Override
public void close() {
// Go through all clients and close them if they are active.
for (ClientPool clientPool : connectionPool.values()) {
for (int i = 0; i < clientPool.clients.length; i++) {
TransportClient client = clientPool.clients[i];
if (client != null) {
clientPool.clients[i] = null;
JavaUtils.closeQuietly(client);
}
}
}
connectionPool.clear();
if (workerGroup != null) {
workerGroup.shutdownGracefully();
workerGroup = null;
}
}
}
| 9,869 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/client/TransportResponseHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.client;
import java.io.IOException;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicLong;
import com.google.common.annotations.VisibleForTesting;
import io.netty.channel.Channel;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.protocol.ChunkFetchFailure;
import org.apache.spark.network.protocol.ChunkFetchSuccess;
import org.apache.spark.network.protocol.ResponseMessage;
import org.apache.spark.network.protocol.RpcFailure;
import org.apache.spark.network.protocol.RpcResponse;
import org.apache.spark.network.protocol.StreamChunkId;
import org.apache.spark.network.protocol.StreamFailure;
import org.apache.spark.network.protocol.StreamResponse;
import org.apache.spark.network.server.MessageHandler;
import static org.apache.spark.network.util.NettyUtils.getRemoteAddress;
import org.apache.spark.network.util.TransportFrameDecoder;
/**
* Handler that processes server responses, in response to requests issued from a
* [[TransportClient]]. It works by tracking the list of outstanding requests (and their callbacks).
*
* Concurrency: thread safe and can be called from multiple threads.
*/
public class TransportResponseHandler extends MessageHandler<ResponseMessage> {
private static final Logger logger = LoggerFactory.getLogger(TransportResponseHandler.class);
private final Channel channel;
private final Map<StreamChunkId, ChunkReceivedCallback> outstandingFetches;
private final Map<Long, RpcResponseCallback> outstandingRpcs;
private final Queue<Pair<String, StreamCallback>> streamCallbacks;
private volatile boolean streamActive;
/** Records the time (in system nanoseconds) that the last fetch or RPC request was sent. */
private final AtomicLong timeOfLastRequestNs;
public TransportResponseHandler(Channel channel) {
this.channel = channel;
this.outstandingFetches = new ConcurrentHashMap<>();
this.outstandingRpcs = new ConcurrentHashMap<>();
this.streamCallbacks = new ConcurrentLinkedQueue<>();
this.timeOfLastRequestNs = new AtomicLong(0);
}
public void addFetchRequest(StreamChunkId streamChunkId, ChunkReceivedCallback callback) {
updateTimeOfLastRequest();
outstandingFetches.put(streamChunkId, callback);
}
public void removeFetchRequest(StreamChunkId streamChunkId) {
outstandingFetches.remove(streamChunkId);
}
public void addRpcRequest(long requestId, RpcResponseCallback callback) {
updateTimeOfLastRequest();
outstandingRpcs.put(requestId, callback);
}
public void removeRpcRequest(long requestId) {
outstandingRpcs.remove(requestId);
}
public void addStreamCallback(String streamId, StreamCallback callback) {
timeOfLastRequestNs.set(System.nanoTime());
streamCallbacks.offer(ImmutablePair.of(streamId, callback));
}
@VisibleForTesting
public void deactivateStream() {
streamActive = false;
}
/**
* Fire the failure callback for all outstanding requests. This is called when we have an
* uncaught exception or pre-mature connection termination.
*/
private void failOutstandingRequests(Throwable cause) {
for (Map.Entry<StreamChunkId, ChunkReceivedCallback> entry : outstandingFetches.entrySet()) {
try {
entry.getValue().onFailure(entry.getKey().chunkIndex, cause);
} catch (Exception e) {
logger.warn("ChunkReceivedCallback.onFailure throws exception", e);
}
}
for (Map.Entry<Long, RpcResponseCallback> entry : outstandingRpcs.entrySet()) {
try {
entry.getValue().onFailure(cause);
} catch (Exception e) {
logger.warn("RpcResponseCallback.onFailure throws exception", e);
}
}
for (Pair<String, StreamCallback> entry : streamCallbacks) {
try {
entry.getValue().onFailure(entry.getKey(), cause);
} catch (Exception e) {
logger.warn("StreamCallback.onFailure throws exception", e);
}
}
// It's OK if new fetches appear, as they will fail immediately.
outstandingFetches.clear();
outstandingRpcs.clear();
streamCallbacks.clear();
}
@Override
public void channelActive() {
}
@Override
public void channelInactive() {
if (numOutstandingRequests() > 0) {
String remoteAddress = getRemoteAddress(channel);
logger.error("Still have {} requests outstanding when connection from {} is closed",
numOutstandingRequests(), remoteAddress);
failOutstandingRequests(new IOException("Connection from " + remoteAddress + " closed"));
}
}
@Override
public void exceptionCaught(Throwable cause) {
if (numOutstandingRequests() > 0) {
String remoteAddress = getRemoteAddress(channel);
logger.error("Still have {} requests outstanding when connection from {} is closed",
numOutstandingRequests(), remoteAddress);
failOutstandingRequests(cause);
}
}
@Override
public void handle(ResponseMessage message) throws Exception {
if (message instanceof ChunkFetchSuccess) {
ChunkFetchSuccess resp = (ChunkFetchSuccess) message;
ChunkReceivedCallback listener = outstandingFetches.get(resp.streamChunkId);
if (listener == null) {
logger.warn("Ignoring response for block {} from {} since it is not outstanding",
resp.streamChunkId, getRemoteAddress(channel));
resp.body().release();
} else {
outstandingFetches.remove(resp.streamChunkId);
listener.onSuccess(resp.streamChunkId.chunkIndex, resp.body());
resp.body().release();
}
} else if (message instanceof ChunkFetchFailure) {
ChunkFetchFailure resp = (ChunkFetchFailure) message;
ChunkReceivedCallback listener = outstandingFetches.get(resp.streamChunkId);
if (listener == null) {
logger.warn("Ignoring response for block {} from {} ({}) since it is not outstanding",
resp.streamChunkId, getRemoteAddress(channel), resp.errorString);
} else {
outstandingFetches.remove(resp.streamChunkId);
listener.onFailure(resp.streamChunkId.chunkIndex, new ChunkFetchFailureException(
"Failure while fetching " + resp.streamChunkId + ": " + resp.errorString));
}
} else if (message instanceof RpcResponse) {
RpcResponse resp = (RpcResponse) message;
RpcResponseCallback listener = outstandingRpcs.get(resp.requestId);
if (listener == null) {
logger.warn("Ignoring response for RPC {} from {} ({} bytes) since it is not outstanding",
resp.requestId, getRemoteAddress(channel), resp.body().size());
} else {
outstandingRpcs.remove(resp.requestId);
try {
listener.onSuccess(resp.body().nioByteBuffer());
} finally {
resp.body().release();
}
}
} else if (message instanceof RpcFailure) {
RpcFailure resp = (RpcFailure) message;
RpcResponseCallback listener = outstandingRpcs.get(resp.requestId);
if (listener == null) {
logger.warn("Ignoring response for RPC {} from {} ({}) since it is not outstanding",
resp.requestId, getRemoteAddress(channel), resp.errorString);
} else {
outstandingRpcs.remove(resp.requestId);
listener.onFailure(new RuntimeException(resp.errorString));
}
} else if (message instanceof StreamResponse) {
StreamResponse resp = (StreamResponse) message;
Pair<String, StreamCallback> entry = streamCallbacks.poll();
if (entry != null) {
StreamCallback callback = entry.getValue();
if (resp.byteCount > 0) {
StreamInterceptor<ResponseMessage> interceptor = new StreamInterceptor<>(
this, resp.streamId, resp.byteCount, callback);
try {
TransportFrameDecoder frameDecoder = (TransportFrameDecoder)
channel.pipeline().get(TransportFrameDecoder.HANDLER_NAME);
frameDecoder.setInterceptor(interceptor);
streamActive = true;
} catch (Exception e) {
logger.error("Error installing stream handler.", e);
deactivateStream();
}
} else {
try {
callback.onComplete(resp.streamId);
} catch (Exception e) {
logger.warn("Error in stream handler onComplete().", e);
}
}
} else {
logger.error("Could not find callback for StreamResponse.");
}
} else if (message instanceof StreamFailure) {
StreamFailure resp = (StreamFailure) message;
Pair<String, StreamCallback> entry = streamCallbacks.poll();
if (entry != null) {
StreamCallback callback = entry.getValue();
try {
callback.onFailure(resp.streamId, new RuntimeException(resp.error));
} catch (IOException ioe) {
logger.warn("Error in stream failure handler.", ioe);
}
} else {
logger.warn("Stream failure with unknown callback: {}", resp.error);
}
} else {
throw new IllegalStateException("Unknown response type: " + message.type());
}
}
/** Returns total number of outstanding requests (fetch requests + rpcs) */
public int numOutstandingRequests() {
return outstandingFetches.size() + outstandingRpcs.size() + streamCallbacks.size() +
(streamActive ? 1 : 0);
}
/** Returns the time in nanoseconds of when the last request was sent out. */
public long getTimeOfLastRequestNs() {
return timeOfLastRequestNs.get();
}
/** Updates the time of the last request to the current system time. */
public void updateTimeOfLastRequest() {
timeOfLastRequestNs.set(System.nanoTime());
}
}
| 9,870 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/client/StreamCallbackWithID.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.client;
public interface StreamCallbackWithID extends StreamCallback {
String getID();
}
| 9,871 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/client/StreamCallback.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.client;
import java.io.IOException;
import java.nio.ByteBuffer;
/**
* Callback for streaming data. Stream data will be offered to the
* {@link #onData(String, ByteBuffer)} method as it arrives. Once all the stream data is received,
* {@link #onComplete(String)} will be called.
* <p>
* The network library guarantees that a single thread will call these methods at a time, but
* different call may be made by different threads.
*/
public interface StreamCallback {
/** Called upon receipt of stream data. */
void onData(String streamId, ByteBuffer buf) throws IOException;
/** Called when all data from the stream has been received. */
void onComplete(String streamId) throws IOException;
/** Called if there's an error reading data from the stream. */
void onFailure(String streamId, Throwable cause) throws IOException;
}
| 9,872 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientBootstrap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.client;
import io.netty.channel.Channel;
/**
* A bootstrap which is executed on a TransportClient before it is returned to the user.
* This enables an initial exchange of information (e.g., SASL authentication tokens) on a once-per-
* connection basis.
*
* Since connections (and TransportClients) are reused as much as possible, it is generally
* reasonable to perform an expensive bootstrapping operation, as they often share a lifespan with
* the JVM itself.
*/
public interface TransportClientBootstrap {
/** Performs the bootstrapping operation, throwing an exception on failure. */
void doBootstrap(TransportClient client, Channel channel) throws RuntimeException;
}
| 9,873 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/buffer/NioManagedBuffer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.buffer;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBufInputStream;
import io.netty.buffer.Unpooled;
/**
* A {@link ManagedBuffer} backed by {@link ByteBuffer}.
*/
public class NioManagedBuffer extends ManagedBuffer {
private final ByteBuffer buf;
public NioManagedBuffer(ByteBuffer buf) {
this.buf = buf;
}
@Override
public long size() {
return buf.remaining();
}
@Override
public ByteBuffer nioByteBuffer() throws IOException {
return buf.duplicate();
}
@Override
public InputStream createInputStream() throws IOException {
return new ByteBufInputStream(Unpooled.wrappedBuffer(buf));
}
@Override
public ManagedBuffer retain() {
return this;
}
@Override
public ManagedBuffer release() {
return this;
}
@Override
public Object convertToNetty() throws IOException {
return Unpooled.wrappedBuffer(buf);
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("buf", buf)
.toString();
}
}
| 9,874 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/buffer/NettyManagedBuffer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.buffer;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufInputStream;
/**
* A {@link ManagedBuffer} backed by a Netty {@link ByteBuf}.
*/
public class NettyManagedBuffer extends ManagedBuffer {
private final ByteBuf buf;
public NettyManagedBuffer(ByteBuf buf) {
this.buf = buf;
}
@Override
public long size() {
return buf.readableBytes();
}
@Override
public ByteBuffer nioByteBuffer() throws IOException {
return buf.nioBuffer();
}
@Override
public InputStream createInputStream() throws IOException {
return new ByteBufInputStream(buf);
}
@Override
public ManagedBuffer retain() {
buf.retain();
return this;
}
@Override
public ManagedBuffer release() {
buf.release();
return this;
}
@Override
public Object convertToNetty() throws IOException {
return buf.duplicate().retain();
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("buf", buf)
.toString();
}
}
| 9,875 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/buffer/FileSegmentManagedBuffer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.buffer;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.StandardOpenOption;
import com.google.common.base.Objects;
import com.google.common.io.ByteStreams;
import io.netty.channel.DefaultFileRegion;
import org.apache.spark.network.util.JavaUtils;
import org.apache.spark.network.util.LimitedInputStream;
import org.apache.spark.network.util.TransportConf;
/**
* A {@link ManagedBuffer} backed by a segment in a file.
*/
public final class FileSegmentManagedBuffer extends ManagedBuffer {
private final TransportConf conf;
private final File file;
private final long offset;
private final long length;
public FileSegmentManagedBuffer(TransportConf conf, File file, long offset, long length) {
this.conf = conf;
this.file = file;
this.offset = offset;
this.length = length;
}
@Override
public long size() {
return length;
}
@Override
public ByteBuffer nioByteBuffer() throws IOException {
FileChannel channel = null;
try {
channel = new RandomAccessFile(file, "r").getChannel();
// Just copy the buffer if it's sufficiently small, as memory mapping has a high overhead.
if (length < conf.memoryMapBytes()) {
ByteBuffer buf = ByteBuffer.allocate((int) length);
channel.position(offset);
while (buf.remaining() != 0) {
if (channel.read(buf) == -1) {
throw new IOException(String.format("Reached EOF before filling buffer\n" +
"offset=%s\nfile=%s\nbuf.remaining=%s",
offset, file.getAbsoluteFile(), buf.remaining()));
}
}
buf.flip();
return buf;
} else {
return channel.map(FileChannel.MapMode.READ_ONLY, offset, length);
}
} catch (IOException e) {
String errorMessage = "Error in reading " + this;
try {
if (channel != null) {
long size = channel.size();
errorMessage = "Error in reading " + this + " (actual file length " + size + ")";
}
} catch (IOException ignored) {
// ignore
}
throw new IOException(errorMessage, e);
} finally {
JavaUtils.closeQuietly(channel);
}
}
@Override
public InputStream createInputStream() throws IOException {
FileInputStream is = null;
boolean shouldClose = true;
try {
is = new FileInputStream(file);
ByteStreams.skipFully(is, offset);
InputStream r = new LimitedInputStream(is, length);
shouldClose = false;
return r;
} catch (IOException e) {
String errorMessage = "Error in reading " + this;
if (is != null) {
long size = file.length();
errorMessage = "Error in reading " + this + " (actual file length " + size + ")";
}
throw new IOException(errorMessage, e);
} finally {
if (shouldClose) {
JavaUtils.closeQuietly(is);
}
}
}
@Override
public ManagedBuffer retain() {
return this;
}
@Override
public ManagedBuffer release() {
return this;
}
@Override
public Object convertToNetty() throws IOException {
if (conf.lazyFileDescriptor()) {
return new DefaultFileRegion(file, offset, length);
} else {
FileChannel fileChannel = FileChannel.open(file.toPath(), StandardOpenOption.READ);
return new DefaultFileRegion(fileChannel, offset, length);
}
}
public File getFile() { return file; }
public long getOffset() { return offset; }
public long getLength() { return length; }
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("file", file)
.add("offset", offset)
.add("length", length)
.toString();
}
}
| 9,876 |
0 | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/main/java/org/apache/spark/network/buffer/ManagedBuffer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.buffer;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
/**
* This interface provides an immutable view for data in the form of bytes. The implementation
* should specify how the data is provided:
*
* - {@link FileSegmentManagedBuffer}: data backed by part of a file
* - {@link NioManagedBuffer}: data backed by a NIO ByteBuffer
* - {@link NettyManagedBuffer}: data backed by a Netty ByteBuf
*
* The concrete buffer implementation might be managed outside the JVM garbage collector.
* For example, in the case of {@link NettyManagedBuffer}, the buffers are reference counted.
* In that case, if the buffer is going to be passed around to a different thread, retain/release
* should be called.
*/
public abstract class ManagedBuffer {
/**
* Number of bytes of the data. If this buffer will decrypt for all of the views into the data,
* this is the size of the decrypted data.
*/
public abstract long size();
/**
* Exposes this buffer's data as an NIO ByteBuffer. Changing the position and limit of the
* returned ByteBuffer should not affect the content of this buffer.
*/
// TODO: Deprecate this, usage may require expensive memory mapping or allocation.
public abstract ByteBuffer nioByteBuffer() throws IOException;
/**
* Exposes this buffer's data as an InputStream. The underlying implementation does not
* necessarily check for the length of bytes read, so the caller is responsible for making sure
* it does not go over the limit.
*/
public abstract InputStream createInputStream() throws IOException;
/**
* Increment the reference count by one if applicable.
*/
public abstract ManagedBuffer retain();
/**
* If applicable, decrement the reference count by one and deallocates the buffer if the
* reference count reaches zero.
*/
public abstract ManagedBuffer release();
/**
* Convert the buffer into an Netty object, used to write the data out. The return value is either
* a {@link io.netty.buffer.ByteBuf} or a {@link io.netty.channel.FileRegion}.
*
* If this method returns a ByteBuf, then that buffer's reference count will be incremented and
* the caller will be responsible for releasing this new reference.
*/
public abstract Object convertToNetty() throws IOException;
}
| 9,877 |
0 | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/TestShuffleDataContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.UUID;
import com.google.common.io.Closeables;
import com.google.common.io.Files;
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo;
import org.apache.spark.network.util.JavaUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Manages some sort-shuffle data, including the creation
* and cleanup of directories that can be read by the {@link ExternalShuffleBlockResolver}.
*/
public class TestShuffleDataContext {
private static final Logger logger = LoggerFactory.getLogger(TestShuffleDataContext.class);
public final String[] localDirs;
public final int subDirsPerLocalDir;
public TestShuffleDataContext(int numLocalDirs, int subDirsPerLocalDir) {
this.localDirs = new String[numLocalDirs];
this.subDirsPerLocalDir = subDirsPerLocalDir;
}
public void create() {
for (int i = 0; i < localDirs.length; i ++) {
localDirs[i] = Files.createTempDir().getAbsolutePath();
for (int p = 0; p < subDirsPerLocalDir; p ++) {
new File(localDirs[i], String.format("%02x", p)).mkdirs();
}
}
}
public void cleanup() {
for (String localDir : localDirs) {
try {
JavaUtils.deleteRecursively(new File(localDir));
} catch (IOException e) {
logger.warn("Unable to cleanup localDir = " + localDir, e);
}
}
}
/** Creates reducer blocks in a sort-based data format within our local dirs. */
public void insertSortShuffleData(int shuffleId, int mapId, byte[][] blocks) throws IOException {
String blockId = "shuffle_" + shuffleId + "_" + mapId + "_0";
OutputStream dataStream = null;
DataOutputStream indexStream = null;
boolean suppressExceptionsDuringClose = true;
try {
dataStream = new FileOutputStream(
ExternalShuffleBlockResolver.getFile(localDirs, subDirsPerLocalDir, blockId + ".data"));
indexStream = new DataOutputStream(new FileOutputStream(
ExternalShuffleBlockResolver.getFile(localDirs, subDirsPerLocalDir, blockId + ".index")));
long offset = 0;
indexStream.writeLong(offset);
for (byte[] block : blocks) {
offset += block.length;
dataStream.write(block);
indexStream.writeLong(offset);
}
suppressExceptionsDuringClose = false;
} finally {
Closeables.close(dataStream, suppressExceptionsDuringClose);
Closeables.close(indexStream, suppressExceptionsDuringClose);
}
}
/** Creates spill file(s) within the local dirs. */
public void insertSpillData() throws IOException {
String filename = "temp_local_" + UUID.randomUUID();
OutputStream dataStream = null;
try {
dataStream = new FileOutputStream(
ExternalShuffleBlockResolver.getFile(localDirs, subDirsPerLocalDir, filename));
dataStream.write(42);
} finally {
Closeables.close(dataStream, false);
}
}
/**
* Creates an ExecutorShuffleInfo object based on the given shuffle manager which targets this
* context's directories.
*/
public ExecutorShuffleInfo createExecutorInfo(String shuffleManager) {
return new ExecutorShuffleInfo(localDirs, subDirsPerLocalDir, shuffleManager);
}
}
| 9,878 |
0 | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/BlockTransferMessagesSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.spark.network.shuffle.protocol.*;
/** Verifies that all BlockTransferMessages can be serialized correctly. */
public class BlockTransferMessagesSuite {
@Test
public void serializeOpenShuffleBlocks() {
checkSerializeDeserialize(new OpenBlocks("app-1", "exec-2", new String[] { "b1", "b2" }));
checkSerializeDeserialize(new RegisterExecutor("app-1", "exec-2", new ExecutorShuffleInfo(
new String[] { "/local1", "/local2" }, 32, "MyShuffleManager")));
checkSerializeDeserialize(new UploadBlock("app-1", "exec-2", "block-3", new byte[] { 1, 2 },
new byte[] { 4, 5, 6, 7} ));
checkSerializeDeserialize(new StreamHandle(12345, 16));
}
private void checkSerializeDeserialize(BlockTransferMessage msg) {
BlockTransferMessage msg2 = BlockTransferMessage.Decoder.fromByteBuffer(msg.toByteBuffer());
assertEquals(msg, msg2);
assertEquals(msg.hashCode(), msg2.hashCode());
assertEquals(msg.toString(), msg2.toString());
}
}
| 9,879 |
0 | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/ExternalShuffleIntegrationSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Sets;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.spark.network.TestUtils;
import org.apache.spark.network.TransportContext;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NioManagedBuffer;
import org.apache.spark.network.server.TransportServer;
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo;
import org.apache.spark.network.util.MapConfigProvider;
import org.apache.spark.network.util.TransportConf;
public class ExternalShuffleIntegrationSuite {
private static final String APP_ID = "app-id";
private static final String SORT_MANAGER = "org.apache.spark.shuffle.sort.SortShuffleManager";
// Executor 0 is sort-based
static TestShuffleDataContext dataContext0;
static ExternalShuffleBlockHandler handler;
static TransportServer server;
static TransportConf conf;
static byte[][] exec0Blocks = new byte[][] {
new byte[123],
new byte[12345],
new byte[1234567],
};
static byte[][] exec1Blocks = new byte[][] {
new byte[321],
new byte[54321],
};
@BeforeClass
public static void beforeAll() throws IOException {
Random rand = new Random();
for (byte[] block : exec0Blocks) {
rand.nextBytes(block);
}
for (byte[] block: exec1Blocks) {
rand.nextBytes(block);
}
dataContext0 = new TestShuffleDataContext(2, 5);
dataContext0.create();
dataContext0.insertSortShuffleData(0, 0, exec0Blocks);
conf = new TransportConf("shuffle", MapConfigProvider.EMPTY);
handler = new ExternalShuffleBlockHandler(conf, null);
TransportContext transportContext = new TransportContext(conf, handler);
server = transportContext.createServer();
}
@AfterClass
public static void afterAll() {
dataContext0.cleanup();
server.close();
}
@After
public void afterEach() {
handler.applicationRemoved(APP_ID, false /* cleanupLocalDirs */);
}
static class FetchResult {
public Set<String> successBlocks;
public Set<String> failedBlocks;
public List<ManagedBuffer> buffers;
public void releaseBuffers() {
for (ManagedBuffer buffer : buffers) {
buffer.release();
}
}
}
// Fetch a set of blocks from a pre-registered executor.
private FetchResult fetchBlocks(String execId, String[] blockIds) throws Exception {
return fetchBlocks(execId, blockIds, conf, server.getPort());
}
// Fetch a set of blocks from a pre-registered executor. Connects to the server on the given port,
// to allow connecting to invalid servers.
private FetchResult fetchBlocks(
String execId,
String[] blockIds,
TransportConf clientConf,
int port) throws Exception {
final FetchResult res = new FetchResult();
res.successBlocks = Collections.synchronizedSet(new HashSet<String>());
res.failedBlocks = Collections.synchronizedSet(new HashSet<String>());
res.buffers = Collections.synchronizedList(new LinkedList<ManagedBuffer>());
final Semaphore requestsRemaining = new Semaphore(0);
ExternalShuffleClient client = new ExternalShuffleClient(clientConf, null, false, 5000);
client.init(APP_ID);
client.fetchBlocks(TestUtils.getLocalHost(), port, execId, blockIds,
new BlockFetchingListener() {
@Override
public void onBlockFetchSuccess(String blockId, ManagedBuffer data) {
synchronized (this) {
if (!res.successBlocks.contains(blockId) && !res.failedBlocks.contains(blockId)) {
data.retain();
res.successBlocks.add(blockId);
res.buffers.add(data);
requestsRemaining.release();
}
}
}
@Override
public void onBlockFetchFailure(String blockId, Throwable exception) {
synchronized (this) {
if (!res.successBlocks.contains(blockId) && !res.failedBlocks.contains(blockId)) {
res.failedBlocks.add(blockId);
requestsRemaining.release();
}
}
}
}, null);
if (!requestsRemaining.tryAcquire(blockIds.length, 5, TimeUnit.SECONDS)) {
fail("Timeout getting response from the server");
}
client.close();
return res;
}
@Test
public void testFetchOneSort() throws Exception {
registerExecutor("exec-0", dataContext0.createExecutorInfo(SORT_MANAGER));
FetchResult exec0Fetch = fetchBlocks("exec-0", new String[] { "shuffle_0_0_0" });
assertEquals(Sets.newHashSet("shuffle_0_0_0"), exec0Fetch.successBlocks);
assertTrue(exec0Fetch.failedBlocks.isEmpty());
assertBufferListsEqual(exec0Fetch.buffers, Arrays.asList(exec0Blocks[0]));
exec0Fetch.releaseBuffers();
}
@Test
public void testFetchThreeSort() throws Exception {
registerExecutor("exec-0", dataContext0.createExecutorInfo(SORT_MANAGER));
FetchResult exec0Fetch = fetchBlocks("exec-0",
new String[] { "shuffle_0_0_0", "shuffle_0_0_1", "shuffle_0_0_2" });
assertEquals(Sets.newHashSet("shuffle_0_0_0", "shuffle_0_0_1", "shuffle_0_0_2"),
exec0Fetch.successBlocks);
assertTrue(exec0Fetch.failedBlocks.isEmpty());
assertBufferListsEqual(exec0Fetch.buffers, Arrays.asList(exec0Blocks));
exec0Fetch.releaseBuffers();
}
@Test (expected = RuntimeException.class)
public void testRegisterInvalidExecutor() throws Exception {
registerExecutor("exec-1", dataContext0.createExecutorInfo("unknown sort manager"));
}
@Test
public void testFetchWrongBlockId() throws Exception {
registerExecutor("exec-1", dataContext0.createExecutorInfo(SORT_MANAGER));
FetchResult execFetch = fetchBlocks("exec-1", new String[] { "rdd_1_0_0" });
assertTrue(execFetch.successBlocks.isEmpty());
assertEquals(Sets.newHashSet("rdd_1_0_0"), execFetch.failedBlocks);
}
@Test
public void testFetchNonexistent() throws Exception {
registerExecutor("exec-0", dataContext0.createExecutorInfo(SORT_MANAGER));
FetchResult execFetch = fetchBlocks("exec-0",
new String[] { "shuffle_2_0_0" });
assertTrue(execFetch.successBlocks.isEmpty());
assertEquals(Sets.newHashSet("shuffle_2_0_0"), execFetch.failedBlocks);
}
@Test
public void testFetchWrongExecutor() throws Exception {
registerExecutor("exec-0", dataContext0.createExecutorInfo(SORT_MANAGER));
FetchResult execFetch0 = fetchBlocks("exec-0", new String[] { "shuffle_0_0_0" /* right */});
FetchResult execFetch1 = fetchBlocks("exec-0", new String[] { "shuffle_1_0_0" /* wrong */ });
assertEquals(Sets.newHashSet("shuffle_0_0_0"), execFetch0.successBlocks);
assertEquals(Sets.newHashSet("shuffle_1_0_0"), execFetch1.failedBlocks);
}
@Test
public void testFetchUnregisteredExecutor() throws Exception {
registerExecutor("exec-0", dataContext0.createExecutorInfo(SORT_MANAGER));
FetchResult execFetch = fetchBlocks("exec-2",
new String[] { "shuffle_0_0_0", "shuffle_1_0_0" });
assertTrue(execFetch.successBlocks.isEmpty());
assertEquals(Sets.newHashSet("shuffle_0_0_0", "shuffle_1_0_0"), execFetch.failedBlocks);
}
@Test
public void testFetchNoServer() throws Exception {
TransportConf clientConf = new TransportConf("shuffle",
new MapConfigProvider(ImmutableMap.of("spark.shuffle.io.maxRetries", "0")));
registerExecutor("exec-0", dataContext0.createExecutorInfo(SORT_MANAGER));
FetchResult execFetch = fetchBlocks("exec-0",
new String[]{"shuffle_1_0_0", "shuffle_1_0_1"}, clientConf, 1 /* port */);
assertTrue(execFetch.successBlocks.isEmpty());
assertEquals(Sets.newHashSet("shuffle_1_0_0", "shuffle_1_0_1"), execFetch.failedBlocks);
}
private static void registerExecutor(String executorId, ExecutorShuffleInfo executorInfo)
throws IOException, InterruptedException {
ExternalShuffleClient client = new ExternalShuffleClient(conf, null, false, 5000);
client.init(APP_ID);
client.registerWithShuffleServer(TestUtils.getLocalHost(), server.getPort(),
executorId, executorInfo);
}
private static void assertBufferListsEqual(List<ManagedBuffer> list0, List<byte[]> list1)
throws Exception {
assertEquals(list0.size(), list1.size());
for (int i = 0; i < list0.size(); i ++) {
assertBuffersEqual(list0.get(i), new NioManagedBuffer(ByteBuffer.wrap(list1.get(i))));
}
}
private static void assertBuffersEqual(ManagedBuffer buffer0, ManagedBuffer buffer1)
throws Exception {
ByteBuffer nio0 = buffer0.nioByteBuffer();
ByteBuffer nio1 = buffer1.nioByteBuffer();
int len = nio0.remaining();
assertEquals(nio0.remaining(), nio1.remaining());
for (int i = 0; i < len; i ++) {
assertEquals(nio0.get(), nio1.get());
}
}
}
| 9,880 |
0 | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/OneForOneBlockFetcherSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import java.nio.ByteBuffer;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import com.google.common.collect.Maps;
import io.netty.buffer.Unpooled;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NettyManagedBuffer;
import org.apache.spark.network.buffer.NioManagedBuffer;
import org.apache.spark.network.client.ChunkReceivedCallback;
import org.apache.spark.network.client.RpcResponseCallback;
import org.apache.spark.network.client.TransportClient;
import org.apache.spark.network.shuffle.protocol.BlockTransferMessage;
import org.apache.spark.network.shuffle.protocol.OpenBlocks;
import org.apache.spark.network.shuffle.protocol.StreamHandle;
import org.apache.spark.network.util.MapConfigProvider;
import org.apache.spark.network.util.TransportConf;
public class OneForOneBlockFetcherSuite {
private static final TransportConf conf = new TransportConf("shuffle", MapConfigProvider.EMPTY);
@Test
public void testFetchOne() {
LinkedHashMap<String, ManagedBuffer> blocks = Maps.newLinkedHashMap();
blocks.put("shuffle_0_0_0", new NioManagedBuffer(ByteBuffer.wrap(new byte[0])));
BlockFetchingListener listener = fetchBlocks(blocks);
verify(listener).onBlockFetchSuccess("shuffle_0_0_0", blocks.get("shuffle_0_0_0"));
}
@Test
public void testFetchThree() {
LinkedHashMap<String, ManagedBuffer> blocks = Maps.newLinkedHashMap();
blocks.put("b0", new NioManagedBuffer(ByteBuffer.wrap(new byte[12])));
blocks.put("b1", new NioManagedBuffer(ByteBuffer.wrap(new byte[23])));
blocks.put("b2", new NettyManagedBuffer(Unpooled.wrappedBuffer(new byte[23])));
BlockFetchingListener listener = fetchBlocks(blocks);
for (int i = 0; i < 3; i ++) {
verify(listener, times(1)).onBlockFetchSuccess("b" + i, blocks.get("b" + i));
}
}
@Test
public void testFailure() {
LinkedHashMap<String, ManagedBuffer> blocks = Maps.newLinkedHashMap();
blocks.put("b0", new NioManagedBuffer(ByteBuffer.wrap(new byte[12])));
blocks.put("b1", null);
blocks.put("b2", null);
BlockFetchingListener listener = fetchBlocks(blocks);
// Each failure will cause a failure to be invoked in all remaining block fetches.
verify(listener, times(1)).onBlockFetchSuccess("b0", blocks.get("b0"));
verify(listener, times(1)).onBlockFetchFailure(eq("b1"), any());
verify(listener, times(2)).onBlockFetchFailure(eq("b2"), any());
}
@Test
public void testFailureAndSuccess() {
LinkedHashMap<String, ManagedBuffer> blocks = Maps.newLinkedHashMap();
blocks.put("b0", new NioManagedBuffer(ByteBuffer.wrap(new byte[12])));
blocks.put("b1", null);
blocks.put("b2", new NioManagedBuffer(ByteBuffer.wrap(new byte[21])));
BlockFetchingListener listener = fetchBlocks(blocks);
// We may call both success and failure for the same block.
verify(listener, times(1)).onBlockFetchSuccess("b0", blocks.get("b0"));
verify(listener, times(1)).onBlockFetchFailure(eq("b1"), any());
verify(listener, times(1)).onBlockFetchSuccess("b2", blocks.get("b2"));
verify(listener, times(1)).onBlockFetchFailure(eq("b2"), any());
}
@Test
public void testEmptyBlockFetch() {
try {
fetchBlocks(Maps.newLinkedHashMap());
fail();
} catch (IllegalArgumentException e) {
assertEquals("Zero-sized blockIds array", e.getMessage());
}
}
/**
* Begins a fetch on the given set of blocks by mocking out the server side of the RPC which
* simply returns the given (BlockId, Block) pairs.
* As "blocks" is a LinkedHashMap, the blocks are guaranteed to be returned in the same order
* that they were inserted in.
*
* If a block's buffer is "null", an exception will be thrown instead.
*/
private static BlockFetchingListener fetchBlocks(LinkedHashMap<String, ManagedBuffer> blocks) {
TransportClient client = mock(TransportClient.class);
BlockFetchingListener listener = mock(BlockFetchingListener.class);
String[] blockIds = blocks.keySet().toArray(new String[blocks.size()]);
OneForOneBlockFetcher fetcher =
new OneForOneBlockFetcher(client, "app-id", "exec-id", blockIds, listener, conf);
// Respond to the "OpenBlocks" message with an appropriate ShuffleStreamHandle with streamId 123
doAnswer(invocationOnMock -> {
BlockTransferMessage message = BlockTransferMessage.Decoder.fromByteBuffer(
(ByteBuffer) invocationOnMock.getArguments()[0]);
RpcResponseCallback callback = (RpcResponseCallback) invocationOnMock.getArguments()[1];
callback.onSuccess(new StreamHandle(123, blocks.size()).toByteBuffer());
assertEquals(new OpenBlocks("app-id", "exec-id", blockIds), message);
return null;
}).when(client).sendRpc(any(ByteBuffer.class), any(RpcResponseCallback.class));
// Respond to each chunk request with a single buffer from our blocks array.
AtomicInteger expectedChunkIndex = new AtomicInteger(0);
Iterator<ManagedBuffer> blockIterator = blocks.values().iterator();
doAnswer(invocation -> {
try {
long streamId = (Long) invocation.getArguments()[0];
int myChunkIndex = (Integer) invocation.getArguments()[1];
assertEquals(123, streamId);
assertEquals(expectedChunkIndex.getAndIncrement(), myChunkIndex);
ChunkReceivedCallback callback = (ChunkReceivedCallback) invocation.getArguments()[2];
ManagedBuffer result = blockIterator.next();
if (result != null) {
callback.onSuccess(myChunkIndex, result);
} else {
callback.onFailure(myChunkIndex, new RuntimeException("Failed " + myChunkIndex));
}
} catch (Exception e) {
e.printStackTrace();
fail("Unexpected failure");
}
return null;
}).when(client).fetchChunk(anyLong(), anyInt(), any());
fetcher.start();
return listener;
}
}
| 9,881 |
0 | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/RetryingBlockFetcherSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Sets;
import org.junit.Test;
import org.mockito.stubbing.Answer;
import org.mockito.stubbing.Stubber;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NioManagedBuffer;
import org.apache.spark.network.util.MapConfigProvider;
import org.apache.spark.network.util.TransportConf;
import static org.apache.spark.network.shuffle.RetryingBlockFetcher.BlockFetchStarter;
/**
* Tests retry logic by throwing IOExceptions and ensuring that subsequent attempts are made to
* fetch the lost blocks.
*/
public class RetryingBlockFetcherSuite {
ManagedBuffer block0 = new NioManagedBuffer(ByteBuffer.wrap(new byte[13]));
ManagedBuffer block1 = new NioManagedBuffer(ByteBuffer.wrap(new byte[7]));
ManagedBuffer block2 = new NioManagedBuffer(ByteBuffer.wrap(new byte[19]));
@Test
public void testNoFailures() throws IOException, InterruptedException {
BlockFetchingListener listener = mock(BlockFetchingListener.class);
List<? extends Map<String, Object>> interactions = Arrays.asList(
// Immediately return both blocks successfully.
ImmutableMap.<String, Object>builder()
.put("b0", block0)
.put("b1", block1)
.build()
);
performInteractions(interactions, listener);
verify(listener).onBlockFetchSuccess("b0", block0);
verify(listener).onBlockFetchSuccess("b1", block1);
verifyNoMoreInteractions(listener);
}
@Test
public void testUnrecoverableFailure() throws IOException, InterruptedException {
BlockFetchingListener listener = mock(BlockFetchingListener.class);
List<? extends Map<String, Object>> interactions = Arrays.asList(
// b0 throws a non-IOException error, so it will be failed without retry.
ImmutableMap.<String, Object>builder()
.put("b0", new RuntimeException("Ouch!"))
.put("b1", block1)
.build()
);
performInteractions(interactions, listener);
verify(listener).onBlockFetchFailure(eq("b0"), any());
verify(listener).onBlockFetchSuccess("b1", block1);
verifyNoMoreInteractions(listener);
}
@Test
public void testSingleIOExceptionOnFirst() throws IOException, InterruptedException {
BlockFetchingListener listener = mock(BlockFetchingListener.class);
List<? extends Map<String, Object>> interactions = Arrays.asList(
// IOException will cause a retry. Since b0 fails, we will retry both.
ImmutableMap.<String, Object>builder()
.put("b0", new IOException("Connection failed or something"))
.put("b1", block1)
.build(),
ImmutableMap.<String, Object>builder()
.put("b0", block0)
.put("b1", block1)
.build()
);
performInteractions(interactions, listener);
verify(listener, timeout(5000)).onBlockFetchSuccess("b0", block0);
verify(listener, timeout(5000)).onBlockFetchSuccess("b1", block1);
verifyNoMoreInteractions(listener);
}
@Test
public void testSingleIOExceptionOnSecond() throws IOException, InterruptedException {
BlockFetchingListener listener = mock(BlockFetchingListener.class);
List<? extends Map<String, Object>> interactions = Arrays.asList(
// IOException will cause a retry. Since b1 fails, we will not retry b0.
ImmutableMap.<String, Object>builder()
.put("b0", block0)
.put("b1", new IOException("Connection failed or something"))
.build(),
ImmutableMap.<String, Object>builder()
.put("b1", block1)
.build()
);
performInteractions(interactions, listener);
verify(listener, timeout(5000)).onBlockFetchSuccess("b0", block0);
verify(listener, timeout(5000)).onBlockFetchSuccess("b1", block1);
verifyNoMoreInteractions(listener);
}
@Test
public void testTwoIOExceptions() throws IOException, InterruptedException {
BlockFetchingListener listener = mock(BlockFetchingListener.class);
List<? extends Map<String, Object>> interactions = Arrays.asList(
// b0's IOException will trigger retry, b1's will be ignored.
ImmutableMap.<String, Object>builder()
.put("b0", new IOException())
.put("b1", new IOException())
.build(),
// Next, b0 is successful and b1 errors again, so we just request that one.
ImmutableMap.<String, Object>builder()
.put("b0", block0)
.put("b1", new IOException())
.build(),
// b1 returns successfully within 2 retries.
ImmutableMap.<String, Object>builder()
.put("b1", block1)
.build()
);
performInteractions(interactions, listener);
verify(listener, timeout(5000)).onBlockFetchSuccess("b0", block0);
verify(listener, timeout(5000)).onBlockFetchSuccess("b1", block1);
verifyNoMoreInteractions(listener);
}
@Test
public void testThreeIOExceptions() throws IOException, InterruptedException {
BlockFetchingListener listener = mock(BlockFetchingListener.class);
List<? extends Map<String, Object>> interactions = Arrays.asList(
// b0's IOException will trigger retry, b1's will be ignored.
ImmutableMap.<String, Object>builder()
.put("b0", new IOException())
.put("b1", new IOException())
.build(),
// Next, b0 is successful and b1 errors again, so we just request that one.
ImmutableMap.<String, Object>builder()
.put("b0", block0)
.put("b1", new IOException())
.build(),
// b1 errors again, but this was the last retry
ImmutableMap.<String, Object>builder()
.put("b1", new IOException())
.build(),
// This is not reached -- b1 has failed.
ImmutableMap.<String, Object>builder()
.put("b1", block1)
.build()
);
performInteractions(interactions, listener);
verify(listener, timeout(5000)).onBlockFetchSuccess("b0", block0);
verify(listener, timeout(5000)).onBlockFetchFailure(eq("b1"), any());
verifyNoMoreInteractions(listener);
}
@Test
public void testRetryAndUnrecoverable() throws IOException, InterruptedException {
BlockFetchingListener listener = mock(BlockFetchingListener.class);
List<? extends Map<String, Object>> interactions = Arrays.asList(
// b0's IOException will trigger retry, subsequent messages will be ignored.
ImmutableMap.<String, Object>builder()
.put("b0", new IOException())
.put("b1", new RuntimeException())
.put("b2", block2)
.build(),
// Next, b0 is successful, b1 errors unrecoverably, and b2 triggers a retry.
ImmutableMap.<String, Object>builder()
.put("b0", block0)
.put("b1", new RuntimeException())
.put("b2", new IOException())
.build(),
// b2 succeeds in its last retry.
ImmutableMap.<String, Object>builder()
.put("b2", block2)
.build()
);
performInteractions(interactions, listener);
verify(listener, timeout(5000)).onBlockFetchSuccess("b0", block0);
verify(listener, timeout(5000)).onBlockFetchFailure(eq("b1"), any());
verify(listener, timeout(5000)).onBlockFetchSuccess("b2", block2);
verifyNoMoreInteractions(listener);
}
/**
* Performs a set of interactions in response to block requests from a RetryingBlockFetcher.
* Each interaction is a Map from BlockId to either ManagedBuffer or Exception. This interaction
* means "respond to the next block fetch request with these Successful buffers and these Failure
* exceptions". We verify that the expected block ids are exactly the ones requested.
*
* If multiple interactions are supplied, they will be used in order. This is useful for encoding
* retries -- the first interaction may include an IOException, which causes a retry of some
* subset of the original blocks in a second interaction.
*/
@SuppressWarnings("unchecked")
private static void performInteractions(List<? extends Map<String, Object>> interactions,
BlockFetchingListener listener)
throws IOException, InterruptedException {
MapConfigProvider provider = new MapConfigProvider(ImmutableMap.of(
"spark.shuffle.io.maxRetries", "2",
"spark.shuffle.io.retryWait", "0"));
TransportConf conf = new TransportConf("shuffle", provider);
BlockFetchStarter fetchStarter = mock(BlockFetchStarter.class);
Stubber stub = null;
// Contains all blockIds that are referenced across all interactions.
LinkedHashSet<String> blockIds = Sets.newLinkedHashSet();
for (Map<String, Object> interaction : interactions) {
blockIds.addAll(interaction.keySet());
Answer<Void> answer = invocationOnMock -> {
try {
// Verify that the RetryingBlockFetcher requested the expected blocks.
String[] requestedBlockIds = (String[]) invocationOnMock.getArguments()[0];
String[] desiredBlockIds = interaction.keySet().toArray(new String[interaction.size()]);
assertArrayEquals(desiredBlockIds, requestedBlockIds);
// Now actually invoke the success/failure callbacks on each block.
BlockFetchingListener retryListener =
(BlockFetchingListener) invocationOnMock.getArguments()[1];
for (Map.Entry<String, Object> block : interaction.entrySet()) {
String blockId = block.getKey();
Object blockValue = block.getValue();
if (blockValue instanceof ManagedBuffer) {
retryListener.onBlockFetchSuccess(blockId, (ManagedBuffer) blockValue);
} else if (blockValue instanceof Exception) {
retryListener.onBlockFetchFailure(blockId, (Exception) blockValue);
} else {
fail("Can only handle ManagedBuffers and Exceptions, got " + blockValue);
}
}
return null;
} catch (Throwable e) {
e.printStackTrace();
throw e;
}
};
// This is either the first stub, or should be chained behind the prior ones.
if (stub == null) {
stub = doAnswer(answer);
} else {
stub.doAnswer(answer);
}
}
assertNotNull(stub);
stub.when(fetchStarter).createAndStart(any(), anyObject());
String[] blockIdArray = blockIds.toArray(new String[blockIds.size()]);
new RetryingBlockFetcher(conf, fetchStarter, blockIdArray, listener).start();
}
}
| 9,882 |
0 | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/ExternalShuffleCleanupSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Random;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicBoolean;
import com.google.common.util.concurrent.MoreExecutors;
import org.junit.Test;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import org.apache.spark.network.util.MapConfigProvider;
import org.apache.spark.network.util.TransportConf;
public class ExternalShuffleCleanupSuite {
// Same-thread Executor used to ensure cleanup happens synchronously in test thread.
private Executor sameThreadExecutor = MoreExecutors.sameThreadExecutor();
private TransportConf conf = new TransportConf("shuffle", MapConfigProvider.EMPTY);
private static final String SORT_MANAGER = "org.apache.spark.shuffle.sort.SortShuffleManager";
@Test
public void noCleanupAndCleanup() throws IOException {
TestShuffleDataContext dataContext = createSomeData();
ExternalShuffleBlockResolver resolver =
new ExternalShuffleBlockResolver(conf, null, sameThreadExecutor);
resolver.registerExecutor("app", "exec0", dataContext.createExecutorInfo(SORT_MANAGER));
resolver.applicationRemoved("app", false /* cleanup */);
assertStillThere(dataContext);
resolver.registerExecutor("app", "exec1", dataContext.createExecutorInfo(SORT_MANAGER));
resolver.applicationRemoved("app", true /* cleanup */);
assertCleanedUp(dataContext);
}
@Test
public void cleanupUsesExecutor() throws IOException {
TestShuffleDataContext dataContext = createSomeData();
AtomicBoolean cleanupCalled = new AtomicBoolean(false);
// Executor which does nothing to ensure we're actually using it.
Executor noThreadExecutor = runnable -> cleanupCalled.set(true);
ExternalShuffleBlockResolver manager =
new ExternalShuffleBlockResolver(conf, null, noThreadExecutor);
manager.registerExecutor("app", "exec0", dataContext.createExecutorInfo(SORT_MANAGER));
manager.applicationRemoved("app", true);
assertTrue(cleanupCalled.get());
assertStillThere(dataContext);
dataContext.cleanup();
assertCleanedUp(dataContext);
}
@Test
public void cleanupMultipleExecutors() throws IOException {
TestShuffleDataContext dataContext0 = createSomeData();
TestShuffleDataContext dataContext1 = createSomeData();
ExternalShuffleBlockResolver resolver =
new ExternalShuffleBlockResolver(conf, null, sameThreadExecutor);
resolver.registerExecutor("app", "exec0", dataContext0.createExecutorInfo(SORT_MANAGER));
resolver.registerExecutor("app", "exec1", dataContext1.createExecutorInfo(SORT_MANAGER));
resolver.applicationRemoved("app", true);
assertCleanedUp(dataContext0);
assertCleanedUp(dataContext1);
}
@Test
public void cleanupOnlyRemovedApp() throws IOException {
TestShuffleDataContext dataContext0 = createSomeData();
TestShuffleDataContext dataContext1 = createSomeData();
ExternalShuffleBlockResolver resolver =
new ExternalShuffleBlockResolver(conf, null, sameThreadExecutor);
resolver.registerExecutor("app-0", "exec0", dataContext0.createExecutorInfo(SORT_MANAGER));
resolver.registerExecutor("app-1", "exec0", dataContext1.createExecutorInfo(SORT_MANAGER));
resolver.applicationRemoved("app-nonexistent", true);
assertStillThere(dataContext0);
assertStillThere(dataContext1);
resolver.applicationRemoved("app-0", true);
assertCleanedUp(dataContext0);
assertStillThere(dataContext1);
resolver.applicationRemoved("app-1", true);
assertCleanedUp(dataContext0);
assertCleanedUp(dataContext1);
// Make sure it's not an error to cleanup multiple times
resolver.applicationRemoved("app-1", true);
assertCleanedUp(dataContext0);
assertCleanedUp(dataContext1);
}
private static void assertStillThere(TestShuffleDataContext dataContext) {
for (String localDir : dataContext.localDirs) {
assertTrue(localDir + " was cleaned up prematurely", new File(localDir).exists());
}
}
private static void assertCleanedUp(TestShuffleDataContext dataContext) {
for (String localDir : dataContext.localDirs) {
assertFalse(localDir + " wasn't cleaned up", new File(localDir).exists());
}
}
private static TestShuffleDataContext createSomeData() throws IOException {
Random rand = new Random(123);
TestShuffleDataContext dataContext = new TestShuffleDataContext(10, 5);
dataContext.create();
dataContext.insertSortShuffleData(rand.nextInt(1000), rand.nextInt(1000), new byte[][] {
"ABC".getBytes(StandardCharsets.UTF_8),
"DEF".getBytes(StandardCharsets.UTF_8)});
return dataContext;
}
}
| 9,883 |
0 | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/ExternalShuffleBlockResolverSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.io.CharStreams;
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo;
import org.apache.spark.network.util.MapConfigProvider;
import org.apache.spark.network.util.TransportConf;
import org.apache.spark.network.shuffle.ExternalShuffleBlockResolver.AppExecId;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
public class ExternalShuffleBlockResolverSuite {
private static final String sortBlock0 = "Hello!";
private static final String sortBlock1 = "World!";
private static final String SORT_MANAGER = "org.apache.spark.shuffle.sort.SortShuffleManager";
private static TestShuffleDataContext dataContext;
private static final TransportConf conf =
new TransportConf("shuffle", MapConfigProvider.EMPTY);
@BeforeClass
public static void beforeAll() throws IOException {
dataContext = new TestShuffleDataContext(2, 5);
dataContext.create();
// Write some sort data.
dataContext.insertSortShuffleData(0, 0, new byte[][] {
sortBlock0.getBytes(StandardCharsets.UTF_8),
sortBlock1.getBytes(StandardCharsets.UTF_8)});
}
@AfterClass
public static void afterAll() {
dataContext.cleanup();
}
@Test
public void testBadRequests() throws IOException {
ExternalShuffleBlockResolver resolver = new ExternalShuffleBlockResolver(conf, null);
// Unregistered executor
try {
resolver.getBlockData("app0", "exec1", 1, 1, 0);
fail("Should have failed");
} catch (RuntimeException e) {
assertTrue("Bad error message: " + e, e.getMessage().contains("not registered"));
}
// Invalid shuffle manager
try {
resolver.registerExecutor("app0", "exec2", dataContext.createExecutorInfo("foobar"));
resolver.getBlockData("app0", "exec2", 1, 1, 0);
fail("Should have failed");
} catch (UnsupportedOperationException e) {
// pass
}
// Nonexistent shuffle block
resolver.registerExecutor("app0", "exec3",
dataContext.createExecutorInfo(SORT_MANAGER));
try {
resolver.getBlockData("app0", "exec3", 1, 1, 0);
fail("Should have failed");
} catch (Exception e) {
// pass
}
}
@Test
public void testSortShuffleBlocks() throws IOException {
ExternalShuffleBlockResolver resolver = new ExternalShuffleBlockResolver(conf, null);
resolver.registerExecutor("app0", "exec0",
dataContext.createExecutorInfo(SORT_MANAGER));
InputStream block0Stream =
resolver.getBlockData("app0", "exec0", 0, 0, 0).createInputStream();
String block0 = CharStreams.toString(
new InputStreamReader(block0Stream, StandardCharsets.UTF_8));
block0Stream.close();
assertEquals(sortBlock0, block0);
InputStream block1Stream =
resolver.getBlockData("app0", "exec0", 0, 0, 1).createInputStream();
String block1 = CharStreams.toString(
new InputStreamReader(block1Stream, StandardCharsets.UTF_8));
block1Stream.close();
assertEquals(sortBlock1, block1);
}
@Test
public void jsonSerializationOfExecutorRegistration() throws IOException {
ObjectMapper mapper = new ObjectMapper();
AppExecId appId = new AppExecId("foo", "bar");
String appIdJson = mapper.writeValueAsString(appId);
AppExecId parsedAppId = mapper.readValue(appIdJson, AppExecId.class);
assertEquals(parsedAppId, appId);
ExecutorShuffleInfo shuffleInfo =
new ExecutorShuffleInfo(new String[]{"/bippy", "/flippy"}, 7, SORT_MANAGER);
String shuffleJson = mapper.writeValueAsString(shuffleInfo);
ExecutorShuffleInfo parsedShuffleInfo =
mapper.readValue(shuffleJson, ExecutorShuffleInfo.class);
assertEquals(parsedShuffleInfo, shuffleInfo);
// Intentionally keep these hard-coded strings in here, to check backwards-compatibility.
// its not legacy yet, but keeping this here in case anybody changes it
String legacyAppIdJson = "{\"appId\":\"foo\", \"execId\":\"bar\"}";
assertEquals(appId, mapper.readValue(legacyAppIdJson, AppExecId.class));
String legacyShuffleJson = "{\"localDirs\": [\"/bippy\", \"/flippy\"], " +
"\"subDirsPerLocalDir\": 7, \"shuffleManager\": " + "\"" + SORT_MANAGER + "\"}";
assertEquals(shuffleInfo, mapper.readValue(legacyShuffleJson, ExecutorShuffleInfo.class));
}
@Test
public void testNormalizeAndInternPathname() {
assertPathsMatch("/foo", "bar", "baz", "/foo/bar/baz");
assertPathsMatch("//foo/", "bar/", "//baz", "/foo/bar/baz");
assertPathsMatch("foo", "bar", "baz///", "foo/bar/baz");
assertPathsMatch("/foo/", "/bar//", "/baz", "/foo/bar/baz");
assertPathsMatch("/", "", "", "/");
assertPathsMatch("/", "/", "/", "/");
}
private void assertPathsMatch(String p1, String p2, String p3, String expectedPathname) {
String normPathname =
ExternalShuffleBlockResolver.createNormalizedInternedPathname(p1, p2, p3);
assertEquals(expectedPathname, normPathname);
File file = new File(normPathname);
String returnedPath = file.getPath();
assertTrue(normPathname == returnedPath);
}
}
| 9,884 |
0 | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/NonShuffleFilesCleanupSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Random;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicBoolean;
import com.google.common.util.concurrent.MoreExecutors;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
import org.apache.spark.network.util.MapConfigProvider;
import org.apache.spark.network.util.TransportConf;
public class NonShuffleFilesCleanupSuite {
// Same-thread Executor used to ensure cleanup happens synchronously in test thread.
private Executor sameThreadExecutor = MoreExecutors.sameThreadExecutor();
private TransportConf conf = new TransportConf("shuffle", MapConfigProvider.EMPTY);
private static final String SORT_MANAGER = "org.apache.spark.shuffle.sort.SortShuffleManager";
@Test
public void cleanupOnRemovedExecutorWithShuffleFiles() throws IOException {
cleanupOnRemovedExecutor(true);
}
@Test
public void cleanupOnRemovedExecutorWithoutShuffleFiles() throws IOException {
cleanupOnRemovedExecutor(false);
}
private void cleanupOnRemovedExecutor(boolean withShuffleFiles) throws IOException {
TestShuffleDataContext dataContext = initDataContext(withShuffleFiles);
ExternalShuffleBlockResolver resolver =
new ExternalShuffleBlockResolver(conf, null, sameThreadExecutor);
resolver.registerExecutor("app", "exec0", dataContext.createExecutorInfo(SORT_MANAGER));
resolver.executorRemoved("exec0", "app");
assertCleanedUp(dataContext);
}
@Test
public void cleanupUsesExecutorWithShuffleFiles() throws IOException {
cleanupUsesExecutor(true);
}
@Test
public void cleanupUsesExecutorWithoutShuffleFiles() throws IOException {
cleanupUsesExecutor(false);
}
private void cleanupUsesExecutor(boolean withShuffleFiles) throws IOException {
TestShuffleDataContext dataContext = initDataContext(withShuffleFiles);
AtomicBoolean cleanupCalled = new AtomicBoolean(false);
// Executor which does nothing to ensure we're actually using it.
Executor noThreadExecutor = runnable -> cleanupCalled.set(true);
ExternalShuffleBlockResolver manager =
new ExternalShuffleBlockResolver(conf, null, noThreadExecutor);
manager.registerExecutor("app", "exec0", dataContext.createExecutorInfo(SORT_MANAGER));
manager.executorRemoved("exec0", "app");
assertTrue(cleanupCalled.get());
assertStillThere(dataContext);
}
@Test
public void cleanupOnlyRemovedExecutorWithShuffleFiles() throws IOException {
cleanupOnlyRemovedExecutor(true);
}
@Test
public void cleanupOnlyRemovedExecutorWithoutShuffleFiles() throws IOException {
cleanupOnlyRemovedExecutor(false);
}
private void cleanupOnlyRemovedExecutor(boolean withShuffleFiles) throws IOException {
TestShuffleDataContext dataContext0 = initDataContext(withShuffleFiles);
TestShuffleDataContext dataContext1 = initDataContext(withShuffleFiles);
ExternalShuffleBlockResolver resolver =
new ExternalShuffleBlockResolver(conf, null, sameThreadExecutor);
resolver.registerExecutor("app", "exec0", dataContext0.createExecutorInfo(SORT_MANAGER));
resolver.registerExecutor("app", "exec1", dataContext1.createExecutorInfo(SORT_MANAGER));
resolver.executorRemoved("exec-nonexistent", "app");
assertStillThere(dataContext0);
assertStillThere(dataContext1);
resolver.executorRemoved("exec0", "app");
assertCleanedUp(dataContext0);
assertStillThere(dataContext1);
resolver.executorRemoved("exec1", "app");
assertCleanedUp(dataContext0);
assertCleanedUp(dataContext1);
// Make sure it's not an error to cleanup multiple times
resolver.executorRemoved("exec1", "app");
assertCleanedUp(dataContext0);
assertCleanedUp(dataContext1);
}
@Test
public void cleanupOnlyRegisteredExecutorWithShuffleFiles() throws IOException {
cleanupOnlyRegisteredExecutor(true);
}
@Test
public void cleanupOnlyRegisteredExecutorWithoutShuffleFiles() throws IOException {
cleanupOnlyRegisteredExecutor(false);
}
private void cleanupOnlyRegisteredExecutor(boolean withShuffleFiles) throws IOException {
TestShuffleDataContext dataContext = initDataContext(withShuffleFiles);
ExternalShuffleBlockResolver resolver =
new ExternalShuffleBlockResolver(conf, null, sameThreadExecutor);
resolver.registerExecutor("app", "exec0", dataContext.createExecutorInfo(SORT_MANAGER));
resolver.executorRemoved("exec1", "app");
assertStillThere(dataContext);
resolver.executorRemoved("exec0", "app");
assertCleanedUp(dataContext);
}
private static void assertStillThere(TestShuffleDataContext dataContext) {
for (String localDir : dataContext.localDirs) {
assertTrue(localDir + " was cleaned up prematurely", new File(localDir).exists());
}
}
private static FilenameFilter filter = new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
// Don't delete shuffle data or shuffle index files.
return !name.endsWith(".index") && !name.endsWith(".data");
}
};
private static boolean assertOnlyShuffleDataInDir(File[] dirs) {
for (File dir : dirs) {
assertTrue(dir.getName() + " wasn't cleaned up", !dir.exists() ||
dir.listFiles(filter).length == 0 || assertOnlyShuffleDataInDir(dir.listFiles()));
}
return true;
}
private static void assertCleanedUp(TestShuffleDataContext dataContext) {
for (String localDir : dataContext.localDirs) {
File[] dirs = new File[] {new File(localDir)};
assertOnlyShuffleDataInDir(dirs);
}
}
private static TestShuffleDataContext initDataContext(boolean withShuffleFiles)
throws IOException {
if (withShuffleFiles) {
return initDataContextWithShuffleFiles();
} else {
return initDataContextWithoutShuffleFiles();
}
}
private static TestShuffleDataContext initDataContextWithShuffleFiles() throws IOException {
TestShuffleDataContext dataContext = createDataContext();
createShuffleFiles(dataContext);
createNonShuffleFiles(dataContext);
return dataContext;
}
private static TestShuffleDataContext initDataContextWithoutShuffleFiles() throws IOException {
TestShuffleDataContext dataContext = createDataContext();
createNonShuffleFiles(dataContext);
return dataContext;
}
private static TestShuffleDataContext createDataContext() {
TestShuffleDataContext dataContext = new TestShuffleDataContext(10, 5);
dataContext.create();
return dataContext;
}
private static void createShuffleFiles(TestShuffleDataContext dataContext) throws IOException {
Random rand = new Random(123);
dataContext.insertSortShuffleData(rand.nextInt(1000), rand.nextInt(1000), new byte[][] {
"ABC".getBytes(StandardCharsets.UTF_8),
"DEF".getBytes(StandardCharsets.UTF_8)});
}
private static void createNonShuffleFiles(TestShuffleDataContext dataContext) throws IOException {
// Create spill file(s)
dataContext.insertSpillData();
}
}
| 9,885 |
0 | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/ExternalShuffleBlockHandlerSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import java.nio.ByteBuffer;
import java.util.Iterator;
import com.codahale.metrics.Meter;
import com.codahale.metrics.Timer;
import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import static org.junit.Assert.*;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.*;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NioManagedBuffer;
import org.apache.spark.network.client.RpcResponseCallback;
import org.apache.spark.network.client.TransportClient;
import org.apache.spark.network.server.OneForOneStreamManager;
import org.apache.spark.network.server.RpcHandler;
import org.apache.spark.network.shuffle.protocol.BlockTransferMessage;
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo;
import org.apache.spark.network.shuffle.protocol.OpenBlocks;
import org.apache.spark.network.shuffle.protocol.RegisterExecutor;
import org.apache.spark.network.shuffle.protocol.StreamHandle;
import org.apache.spark.network.shuffle.protocol.UploadBlock;
public class ExternalShuffleBlockHandlerSuite {
TransportClient client = mock(TransportClient.class);
OneForOneStreamManager streamManager;
ExternalShuffleBlockResolver blockResolver;
RpcHandler handler;
@Before
public void beforeEach() {
streamManager = mock(OneForOneStreamManager.class);
blockResolver = mock(ExternalShuffleBlockResolver.class);
handler = new ExternalShuffleBlockHandler(streamManager, blockResolver);
}
@Test
public void testRegisterExecutor() {
RpcResponseCallback callback = mock(RpcResponseCallback.class);
ExecutorShuffleInfo config = new ExecutorShuffleInfo(new String[] {"/a", "/b"}, 16, "sort");
ByteBuffer registerMessage = new RegisterExecutor("app0", "exec1", config).toByteBuffer();
handler.receive(client, registerMessage, callback);
verify(blockResolver, times(1)).registerExecutor("app0", "exec1", config);
verify(callback, times(1)).onSuccess(any(ByteBuffer.class));
verify(callback, never()).onFailure(any(Throwable.class));
// Verify register executor request latency metrics
Timer registerExecutorRequestLatencyMillis = (Timer) ((ExternalShuffleBlockHandler) handler)
.getAllMetrics()
.getMetrics()
.get("registerExecutorRequestLatencyMillis");
assertEquals(1, registerExecutorRequestLatencyMillis.getCount());
}
@SuppressWarnings("unchecked")
@Test
public void testOpenShuffleBlocks() {
RpcResponseCallback callback = mock(RpcResponseCallback.class);
ManagedBuffer block0Marker = new NioManagedBuffer(ByteBuffer.wrap(new byte[3]));
ManagedBuffer block1Marker = new NioManagedBuffer(ByteBuffer.wrap(new byte[7]));
when(blockResolver.getBlockData("app0", "exec1", 0, 0, 0)).thenReturn(block0Marker);
when(blockResolver.getBlockData("app0", "exec1", 0, 0, 1)).thenReturn(block1Marker);
ByteBuffer openBlocks = new OpenBlocks("app0", "exec1",
new String[] { "shuffle_0_0_0", "shuffle_0_0_1" })
.toByteBuffer();
handler.receive(client, openBlocks, callback);
ArgumentCaptor<ByteBuffer> response = ArgumentCaptor.forClass(ByteBuffer.class);
verify(callback, times(1)).onSuccess(response.capture());
verify(callback, never()).onFailure(any());
StreamHandle handle =
(StreamHandle) BlockTransferMessage.Decoder.fromByteBuffer(response.getValue());
assertEquals(2, handle.numChunks);
@SuppressWarnings("unchecked")
ArgumentCaptor<Iterator<ManagedBuffer>> stream = (ArgumentCaptor<Iterator<ManagedBuffer>>)
(ArgumentCaptor<?>) ArgumentCaptor.forClass(Iterator.class);
verify(streamManager, times(1)).registerStream(anyString(), stream.capture(),
any());
Iterator<ManagedBuffer> buffers = stream.getValue();
assertEquals(block0Marker, buffers.next());
assertEquals(block1Marker, buffers.next());
assertFalse(buffers.hasNext());
verify(blockResolver, times(1)).getBlockData("app0", "exec1", 0, 0, 0);
verify(blockResolver, times(1)).getBlockData("app0", "exec1", 0, 0, 1);
// Verify open block request latency metrics
Timer openBlockRequestLatencyMillis = (Timer) ((ExternalShuffleBlockHandler) handler)
.getAllMetrics()
.getMetrics()
.get("openBlockRequestLatencyMillis");
assertEquals(1, openBlockRequestLatencyMillis.getCount());
// Verify block transfer metrics
Meter blockTransferRateBytes = (Meter) ((ExternalShuffleBlockHandler) handler)
.getAllMetrics()
.getMetrics()
.get("blockTransferRateBytes");
assertEquals(10, blockTransferRateBytes.getCount());
}
@Test
public void testBadMessages() {
RpcResponseCallback callback = mock(RpcResponseCallback.class);
ByteBuffer unserializableMsg = ByteBuffer.wrap(new byte[] { 0x12, 0x34, 0x56 });
try {
handler.receive(client, unserializableMsg, callback);
fail("Should have thrown");
} catch (Exception e) {
// pass
}
ByteBuffer unexpectedMsg = new UploadBlock("a", "e", "b", new byte[1],
new byte[2]).toByteBuffer();
try {
handler.receive(client, unexpectedMsg, callback);
fail("Should have thrown");
} catch (UnsupportedOperationException e) {
// pass
}
verify(callback, never()).onSuccess(any(ByteBuffer.class));
verify(callback, never()).onFailure(any(Throwable.class));
}
}
| 9,886 |
0 | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/ExternalShuffleSecuritySuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import java.io.IOException;
import java.util.Arrays;
import com.google.common.collect.ImmutableMap;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.spark.network.TestUtils;
import org.apache.spark.network.TransportContext;
import org.apache.spark.network.sasl.SaslServerBootstrap;
import org.apache.spark.network.sasl.SecretKeyHolder;
import org.apache.spark.network.server.TransportServer;
import org.apache.spark.network.server.TransportServerBootstrap;
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo;
import org.apache.spark.network.util.MapConfigProvider;
import org.apache.spark.network.util.TransportConf;
public class ExternalShuffleSecuritySuite {
TransportConf conf = new TransportConf("shuffle", MapConfigProvider.EMPTY);
TransportServer server;
@Before
public void beforeEach() throws IOException {
TransportContext context =
new TransportContext(conf, new ExternalShuffleBlockHandler(conf, null));
TransportServerBootstrap bootstrap = new SaslServerBootstrap(conf,
new TestSecretKeyHolder("my-app-id", "secret"));
this.server = context.createServer(Arrays.asList(bootstrap));
}
@After
public void afterEach() {
if (server != null) {
server.close();
server = null;
}
}
@Test
public void testValid() throws IOException, InterruptedException {
validate("my-app-id", "secret", false);
}
@Test
public void testBadAppId() {
try {
validate("wrong-app-id", "secret", false);
} catch (Exception e) {
assertTrue(e.getMessage(), e.getMessage().contains("Wrong appId!"));
}
}
@Test
public void testBadSecret() {
try {
validate("my-app-id", "bad-secret", false);
} catch (Exception e) {
assertTrue(e.getMessage(), e.getMessage().contains("Mismatched response"));
}
}
@Test
public void testEncryption() throws IOException, InterruptedException {
validate("my-app-id", "secret", true);
}
/** Creates an ExternalShuffleClient and attempts to register with the server. */
private void validate(String appId, String secretKey, boolean encrypt)
throws IOException, InterruptedException {
TransportConf testConf = conf;
if (encrypt) {
testConf = new TransportConf("shuffle", new MapConfigProvider(
ImmutableMap.of("spark.authenticate.enableSaslEncryption", "true")));
}
ExternalShuffleClient client =
new ExternalShuffleClient(testConf, new TestSecretKeyHolder(appId, secretKey), true, 5000);
client.init(appId);
// Registration either succeeds or throws an exception.
client.registerWithShuffleServer(TestUtils.getLocalHost(), server.getPort(), "exec0",
new ExecutorShuffleInfo(new String[0], 0,
"org.apache.spark.shuffle.sort.SortShuffleManager"));
client.close();
}
/** Provides a secret key holder which always returns the given secret key, for a single appId. */
static class TestSecretKeyHolder implements SecretKeyHolder {
private final String appId;
private final String secretKey;
TestSecretKeyHolder(String appId, String secretKey) {
this.appId = appId;
this.secretKey = secretKey;
}
@Override
public String getSaslUser(String appId) {
return "user";
}
@Override
public String getSecretKey(String appId) {
if (!appId.equals(this.appId)) {
throw new IllegalArgumentException("Wrong appId!");
}
return secretKey;
}
}
}
| 9,887 |
0 | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network/sasl/ShuffleSecretManagerSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.sasl;
import java.nio.ByteBuffer;
import org.junit.Test;
import static org.junit.Assert.*;
public class ShuffleSecretManagerSuite {
static String app1 = "app1";
static String app2 = "app2";
static String pw1 = "password1";
static String pw2 = "password2";
static String pw1update = "password1update";
static String pw2update = "password2update";
@Test
public void testMultipleRegisters() {
ShuffleSecretManager secretManager = new ShuffleSecretManager();
secretManager.registerApp(app1, pw1);
assertEquals(pw1, secretManager.getSecretKey(app1));
secretManager.registerApp(app2, ByteBuffer.wrap(pw2.getBytes()));
assertEquals(pw2, secretManager.getSecretKey(app2));
// now update the password for the apps and make sure it takes affect
secretManager.registerApp(app1, pw1update);
assertEquals(pw1update, secretManager.getSecretKey(app1));
secretManager.registerApp(app2, ByteBuffer.wrap(pw2update.getBytes()));
assertEquals(pw2update, secretManager.getSecretKey(app2));
secretManager.unregisterApp(app1);
assertNull(secretManager.getSecretKey(app1));
assertEquals(pw2update, secretManager.getSecretKey(app2));
secretManager.unregisterApp(app2);
assertNull(secretManager.getSecretKey(app2));
assertNull(secretManager.getSecretKey(app1));
}
}
| 9,888 |
0 | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/test/java/org/apache/spark/network/sasl/SaslIntegrationSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.sasl;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicReference;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import org.apache.spark.network.TestUtils;
import org.apache.spark.network.TransportContext;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.client.ChunkReceivedCallback;
import org.apache.spark.network.client.RpcResponseCallback;
import org.apache.spark.network.client.TransportClient;
import org.apache.spark.network.client.TransportClientFactory;
import org.apache.spark.network.server.OneForOneStreamManager;
import org.apache.spark.network.server.RpcHandler;
import org.apache.spark.network.server.StreamManager;
import org.apache.spark.network.server.TransportServer;
import org.apache.spark.network.server.TransportServerBootstrap;
import org.apache.spark.network.shuffle.BlockFetchingListener;
import org.apache.spark.network.shuffle.ExternalShuffleBlockHandler;
import org.apache.spark.network.shuffle.ExternalShuffleBlockResolver;
import org.apache.spark.network.shuffle.OneForOneBlockFetcher;
import org.apache.spark.network.shuffle.protocol.BlockTransferMessage;
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo;
import org.apache.spark.network.shuffle.protocol.OpenBlocks;
import org.apache.spark.network.shuffle.protocol.RegisterExecutor;
import org.apache.spark.network.shuffle.protocol.StreamHandle;
import org.apache.spark.network.util.JavaUtils;
import org.apache.spark.network.util.MapConfigProvider;
import org.apache.spark.network.util.TransportConf;
public class SaslIntegrationSuite {
// Use a long timeout to account for slow / overloaded build machines. In the normal case,
// tests should finish way before the timeout expires.
private static final long TIMEOUT_MS = 10_000;
static TransportServer server;
static TransportConf conf;
static TransportContext context;
static SecretKeyHolder secretKeyHolder;
TransportClientFactory clientFactory;
@BeforeClass
public static void beforeAll() throws IOException {
conf = new TransportConf("shuffle", MapConfigProvider.EMPTY);
context = new TransportContext(conf, new TestRpcHandler());
secretKeyHolder = mock(SecretKeyHolder.class);
when(secretKeyHolder.getSaslUser(eq("app-1"))).thenReturn("app-1");
when(secretKeyHolder.getSecretKey(eq("app-1"))).thenReturn("app-1");
when(secretKeyHolder.getSaslUser(eq("app-2"))).thenReturn("app-2");
when(secretKeyHolder.getSecretKey(eq("app-2"))).thenReturn("app-2");
when(secretKeyHolder.getSaslUser(anyString())).thenReturn("other-app");
when(secretKeyHolder.getSecretKey(anyString())).thenReturn("correct-password");
TransportServerBootstrap bootstrap = new SaslServerBootstrap(conf, secretKeyHolder);
server = context.createServer(Arrays.asList(bootstrap));
}
@AfterClass
public static void afterAll() {
server.close();
}
@After
public void afterEach() {
if (clientFactory != null) {
clientFactory.close();
clientFactory = null;
}
}
@Test
public void testGoodClient() throws IOException, InterruptedException {
clientFactory = context.createClientFactory(
Arrays.asList(new SaslClientBootstrap(conf, "app-1", secretKeyHolder)));
TransportClient client = clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
String msg = "Hello, World!";
ByteBuffer resp = client.sendRpcSync(JavaUtils.stringToBytes(msg), TIMEOUT_MS);
assertEquals(msg, JavaUtils.bytesToString(resp));
}
@Test
public void testBadClient() {
SecretKeyHolder badKeyHolder = mock(SecretKeyHolder.class);
when(badKeyHolder.getSaslUser(anyString())).thenReturn("other-app");
when(badKeyHolder.getSecretKey(anyString())).thenReturn("wrong-password");
clientFactory = context.createClientFactory(
Arrays.asList(new SaslClientBootstrap(conf, "unknown-app", badKeyHolder)));
try {
// Bootstrap should fail on startup.
clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
fail("Connection should have failed.");
} catch (Exception e) {
assertTrue(e.getMessage(), e.getMessage().contains("Mismatched response"));
}
}
@Test
public void testNoSaslClient() throws IOException, InterruptedException {
clientFactory = context.createClientFactory(new ArrayList<>());
TransportClient client = clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
try {
client.sendRpcSync(ByteBuffer.allocate(13), TIMEOUT_MS);
fail("Should have failed");
} catch (Exception e) {
assertTrue(e.getMessage(), e.getMessage().contains("Expected SaslMessage"));
}
try {
// Guessing the right tag byte doesn't magically get you in...
client.sendRpcSync(ByteBuffer.wrap(new byte[] { (byte) 0xEA }), TIMEOUT_MS);
fail("Should have failed");
} catch (Exception e) {
assertTrue(e.getMessage(), e.getMessage().contains("java.lang.IndexOutOfBoundsException"));
}
}
@Test
public void testNoSaslServer() {
RpcHandler handler = new TestRpcHandler();
TransportContext context = new TransportContext(conf, handler);
clientFactory = context.createClientFactory(
Arrays.asList(new SaslClientBootstrap(conf, "app-1", secretKeyHolder)));
try (TransportServer server = context.createServer()) {
clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
} catch (Exception e) {
assertTrue(e.getMessage(), e.getMessage().contains("Digest-challenge format violation"));
}
}
/**
* This test is not actually testing SASL behavior, but testing that the shuffle service
* performs correct authorization checks based on the SASL authentication data.
*/
@Test
public void testAppIsolation() throws Exception {
// Start a new server with the correct RPC handler to serve block data.
ExternalShuffleBlockResolver blockResolver = mock(ExternalShuffleBlockResolver.class);
ExternalShuffleBlockHandler blockHandler = new ExternalShuffleBlockHandler(
new OneForOneStreamManager(), blockResolver);
TransportServerBootstrap bootstrap = new SaslServerBootstrap(conf, secretKeyHolder);
TransportContext blockServerContext = new TransportContext(conf, blockHandler);
TransportServer blockServer = blockServerContext.createServer(Arrays.asList(bootstrap));
TransportClient client1 = null;
TransportClient client2 = null;
TransportClientFactory clientFactory2 = null;
try {
// Create a client, and make a request to fetch blocks from a different app.
clientFactory = blockServerContext.createClientFactory(
Arrays.asList(new SaslClientBootstrap(conf, "app-1", secretKeyHolder)));
client1 = clientFactory.createClient(TestUtils.getLocalHost(),
blockServer.getPort());
AtomicReference<Throwable> exception = new AtomicReference<>();
CountDownLatch blockFetchLatch = new CountDownLatch(1);
BlockFetchingListener listener = new BlockFetchingListener() {
@Override
public void onBlockFetchSuccess(String blockId, ManagedBuffer data) {
blockFetchLatch.countDown();
}
@Override
public void onBlockFetchFailure(String blockId, Throwable t) {
exception.set(t);
blockFetchLatch.countDown();
}
};
String[] blockIds = { "shuffle_0_1_2", "shuffle_0_3_4" };
OneForOneBlockFetcher fetcher =
new OneForOneBlockFetcher(client1, "app-2", "0", blockIds, listener, conf);
fetcher.start();
blockFetchLatch.await();
checkSecurityException(exception.get());
// Register an executor so that the next steps work.
ExecutorShuffleInfo executorInfo = new ExecutorShuffleInfo(
new String[] { System.getProperty("java.io.tmpdir") }, 1,
"org.apache.spark.shuffle.sort.SortShuffleManager");
RegisterExecutor regmsg = new RegisterExecutor("app-1", "0", executorInfo);
client1.sendRpcSync(regmsg.toByteBuffer(), TIMEOUT_MS);
// Make a successful request to fetch blocks, which creates a new stream. But do not actually
// fetch any blocks, to keep the stream open.
OpenBlocks openMessage = new OpenBlocks("app-1", "0", blockIds);
ByteBuffer response = client1.sendRpcSync(openMessage.toByteBuffer(), TIMEOUT_MS);
StreamHandle stream = (StreamHandle) BlockTransferMessage.Decoder.fromByteBuffer(response);
long streamId = stream.streamId;
// Create a second client, authenticated with a different app ID, and try to read from
// the stream created for the previous app.
clientFactory2 = blockServerContext.createClientFactory(
Arrays.asList(new SaslClientBootstrap(conf, "app-2", secretKeyHolder)));
client2 = clientFactory2.createClient(TestUtils.getLocalHost(),
blockServer.getPort());
CountDownLatch chunkReceivedLatch = new CountDownLatch(1);
ChunkReceivedCallback callback = new ChunkReceivedCallback() {
@Override
public void onSuccess(int chunkIndex, ManagedBuffer buffer) {
chunkReceivedLatch.countDown();
}
@Override
public void onFailure(int chunkIndex, Throwable t) {
exception.set(t);
chunkReceivedLatch.countDown();
}
};
exception.set(null);
client2.fetchChunk(streamId, 0, callback);
chunkReceivedLatch.await();
checkSecurityException(exception.get());
} finally {
if (client1 != null) {
client1.close();
}
if (client2 != null) {
client2.close();
}
if (clientFactory2 != null) {
clientFactory2.close();
}
blockServer.close();
}
}
/** RPC handler which simply responds with the message it received. */
public static class TestRpcHandler extends RpcHandler {
@Override
public void receive(TransportClient client, ByteBuffer message, RpcResponseCallback callback) {
callback.onSuccess(message);
}
@Override
public StreamManager getStreamManager() {
return new OneForOneStreamManager();
}
}
private static void checkSecurityException(Throwable t) {
assertNotNull("No exception was caught.", t);
assertTrue("Expected SecurityException.",
t.getMessage().contains(SecurityException.class.getName()));
}
}
| 9,889 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/SimpleDownloadFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.Channels;
import java.nio.channels.WritableByteChannel;
import org.apache.spark.network.buffer.FileSegmentManagedBuffer;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.util.TransportConf;
/**
* A DownloadFile that does not take any encryption settings into account for reading and
* writing data.
*
* This does *not* mean the data in the file is un-encrypted -- it could be that the data is
* already encrypted when its written, and subsequent layer is responsible for decrypting.
*/
public class SimpleDownloadFile implements DownloadFile {
private final File file;
private final TransportConf transportConf;
public SimpleDownloadFile(File file, TransportConf transportConf) {
this.file = file;
this.transportConf = transportConf;
}
@Override
public boolean delete() {
return file.delete();
}
@Override
public DownloadFileWritableChannel openForWriting() throws IOException {
return new SimpleDownloadWritableChannel();
}
@Override
public String path() {
return file.getAbsolutePath();
}
private class SimpleDownloadWritableChannel implements DownloadFileWritableChannel {
private final WritableByteChannel channel;
SimpleDownloadWritableChannel() throws FileNotFoundException {
channel = Channels.newChannel(new FileOutputStream(file));
}
@Override
public ManagedBuffer closeAndRead() {
return new FileSegmentManagedBuffer(transportConf, file, 0, file.length());
}
@Override
public int write(ByteBuffer src) throws IOException {
return channel.write(src);
}
@Override
public boolean isOpen() {
return channel.isOpen();
}
@Override
public void close() throws IOException {
channel.close();
}
}
}
| 9,890 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/OneForOneBlockFetcher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.client.ChunkReceivedCallback;
import org.apache.spark.network.client.RpcResponseCallback;
import org.apache.spark.network.client.StreamCallback;
import org.apache.spark.network.client.TransportClient;
import org.apache.spark.network.server.OneForOneStreamManager;
import org.apache.spark.network.shuffle.protocol.BlockTransferMessage;
import org.apache.spark.network.shuffle.protocol.OpenBlocks;
import org.apache.spark.network.shuffle.protocol.StreamHandle;
import org.apache.spark.network.util.TransportConf;
/**
* Simple wrapper on top of a TransportClient which interprets each chunk as a whole block, and
* invokes the BlockFetchingListener appropriately. This class is agnostic to the actual RPC
* handler, as long as there is a single "open blocks" message which returns a ShuffleStreamHandle,
* and Java serialization is used.
*
* Note that this typically corresponds to a
* {@link org.apache.spark.network.server.OneForOneStreamManager} on the server side.
*/
public class OneForOneBlockFetcher {
private static final Logger logger = LoggerFactory.getLogger(OneForOneBlockFetcher.class);
private final TransportClient client;
private final OpenBlocks openMessage;
private final String[] blockIds;
private final BlockFetchingListener listener;
private final ChunkReceivedCallback chunkCallback;
private final TransportConf transportConf;
private final DownloadFileManager downloadFileManager;
private StreamHandle streamHandle = null;
public OneForOneBlockFetcher(
TransportClient client,
String appId,
String execId,
String[] blockIds,
BlockFetchingListener listener,
TransportConf transportConf) {
this(client, appId, execId, blockIds, listener, transportConf, null);
}
public OneForOneBlockFetcher(
TransportClient client,
String appId,
String execId,
String[] blockIds,
BlockFetchingListener listener,
TransportConf transportConf,
DownloadFileManager downloadFileManager) {
this.client = client;
this.openMessage = new OpenBlocks(appId, execId, blockIds);
this.blockIds = blockIds;
this.listener = listener;
this.chunkCallback = new ChunkCallback();
this.transportConf = transportConf;
this.downloadFileManager = downloadFileManager;
}
/** Callback invoked on receipt of each chunk. We equate a single chunk to a single block. */
private class ChunkCallback implements ChunkReceivedCallback {
@Override
public void onSuccess(int chunkIndex, ManagedBuffer buffer) {
// On receipt of a chunk, pass it upwards as a block.
listener.onBlockFetchSuccess(blockIds[chunkIndex], buffer);
}
@Override
public void onFailure(int chunkIndex, Throwable e) {
// On receipt of a failure, fail every block from chunkIndex onwards.
String[] remainingBlockIds = Arrays.copyOfRange(blockIds, chunkIndex, blockIds.length);
failRemainingBlocks(remainingBlockIds, e);
}
}
/**
* Begins the fetching process, calling the listener with every block fetched.
* The given message will be serialized with the Java serializer, and the RPC must return a
* {@link StreamHandle}. We will send all fetch requests immediately, without throttling.
*/
public void start() {
if (blockIds.length == 0) {
throw new IllegalArgumentException("Zero-sized blockIds array");
}
client.sendRpc(openMessage.toByteBuffer(), new RpcResponseCallback() {
@Override
public void onSuccess(ByteBuffer response) {
try {
streamHandle = (StreamHandle) BlockTransferMessage.Decoder.fromByteBuffer(response);
logger.trace("Successfully opened blocks {}, preparing to fetch chunks.", streamHandle);
// Immediately request all chunks -- we expect that the total size of the request is
// reasonable due to higher level chunking in [[ShuffleBlockFetcherIterator]].
for (int i = 0; i < streamHandle.numChunks; i++) {
if (downloadFileManager != null) {
client.stream(OneForOneStreamManager.genStreamChunkId(streamHandle.streamId, i),
new DownloadCallback(i));
} else {
client.fetchChunk(streamHandle.streamId, i, chunkCallback);
}
}
} catch (Exception e) {
logger.error("Failed while starting block fetches after success", e);
failRemainingBlocks(blockIds, e);
}
}
@Override
public void onFailure(Throwable e) {
logger.error("Failed while starting block fetches", e);
failRemainingBlocks(blockIds, e);
}
});
}
/** Invokes the "onBlockFetchFailure" callback for every listed block id. */
private void failRemainingBlocks(String[] failedBlockIds, Throwable e) {
for (String blockId : failedBlockIds) {
try {
listener.onBlockFetchFailure(blockId, e);
} catch (Exception e2) {
logger.error("Error in block fetch failure callback", e2);
}
}
}
private class DownloadCallback implements StreamCallback {
private DownloadFileWritableChannel channel = null;
private DownloadFile targetFile = null;
private int chunkIndex;
DownloadCallback(int chunkIndex) throws IOException {
this.targetFile = downloadFileManager.createTempFile(transportConf);
this.channel = targetFile.openForWriting();
this.chunkIndex = chunkIndex;
}
@Override
public void onData(String streamId, ByteBuffer buf) throws IOException {
while (buf.hasRemaining()) {
channel.write(buf);
}
}
@Override
public void onComplete(String streamId) throws IOException {
listener.onBlockFetchSuccess(blockIds[chunkIndex], channel.closeAndRead());
if (!downloadFileManager.registerTempFileToClean(targetFile)) {
targetFile.delete();
}
}
@Override
public void onFailure(String streamId, Throwable cause) throws IOException {
channel.close();
// On receipt of a failure, fail every block from chunkIndex onwards.
String[] remainingBlockIds = Arrays.copyOfRange(blockIds, chunkIndex, blockIds.length);
failRemainingBlocks(remainingBlockIds, cause);
targetFile.delete();
}
}
}
| 9,891 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/BlockFetchingListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import java.util.EventListener;
import org.apache.spark.network.buffer.ManagedBuffer;
public interface BlockFetchingListener extends EventListener {
/**
* Called once per successfully fetched block. After this call returns, data will be released
* automatically. If the data will be passed to another thread, the receiver should retain()
* and release() the buffer on their own, or copy the data to a new buffer.
*/
void onBlockFetchSuccess(String blockId, ManagedBuffer data);
/**
* Called at least once per block upon failures.
*/
void onBlockFetchFailure(String blockId, Throwable exception);
}
| 9,892 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.List;
import com.codahale.metrics.MetricSet;
import com.google.common.collect.Lists;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.TransportContext;
import org.apache.spark.network.client.TransportClient;
import org.apache.spark.network.client.TransportClientBootstrap;
import org.apache.spark.network.client.TransportClientFactory;
import org.apache.spark.network.crypto.AuthClientBootstrap;
import org.apache.spark.network.sasl.SecretKeyHolder;
import org.apache.spark.network.server.NoOpRpcHandler;
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo;
import org.apache.spark.network.shuffle.protocol.RegisterExecutor;
import org.apache.spark.network.util.TransportConf;
/**
* Client for reading shuffle blocks which points to an external (outside of executor) server.
* This is instead of reading shuffle blocks directly from other executors (via
* BlockTransferService), which has the downside of losing the shuffle data if we lose the
* executors.
*/
public class ExternalShuffleClient extends ShuffleClient {
private static final Logger logger = LoggerFactory.getLogger(ExternalShuffleClient.class);
private final TransportConf conf;
private final boolean authEnabled;
private final SecretKeyHolder secretKeyHolder;
private final long registrationTimeoutMs;
protected TransportClientFactory clientFactory;
protected String appId;
/**
* Creates an external shuffle client, with SASL optionally enabled. If SASL is not enabled,
* then secretKeyHolder may be null.
*/
public ExternalShuffleClient(
TransportConf conf,
SecretKeyHolder secretKeyHolder,
boolean authEnabled,
long registrationTimeoutMs) {
this.conf = conf;
this.secretKeyHolder = secretKeyHolder;
this.authEnabled = authEnabled;
this.registrationTimeoutMs = registrationTimeoutMs;
}
protected void checkInit() {
assert appId != null : "Called before init()";
}
@Override
public void init(String appId) {
this.appId = appId;
TransportContext context = new TransportContext(conf, new NoOpRpcHandler(), true);
List<TransportClientBootstrap> bootstraps = Lists.newArrayList();
if (authEnabled) {
bootstraps.add(new AuthClientBootstrap(conf, appId, secretKeyHolder));
}
clientFactory = context.createClientFactory(bootstraps);
}
@Override
public void fetchBlocks(
String host,
int port,
String execId,
String[] blockIds,
BlockFetchingListener listener,
DownloadFileManager downloadFileManager) {
checkInit();
logger.debug("External shuffle fetch from {}:{} (executor id {})", host, port, execId);
try {
RetryingBlockFetcher.BlockFetchStarter blockFetchStarter =
(blockIds1, listener1) -> {
TransportClient client = clientFactory.createClient(host, port);
new OneForOneBlockFetcher(client, appId, execId,
blockIds1, listener1, conf, downloadFileManager).start();
};
int maxRetries = conf.maxIORetries();
if (maxRetries > 0) {
// Note this Fetcher will correctly handle maxRetries == 0; we avoid it just in case there's
// a bug in this code. We should remove the if statement once we're sure of the stability.
new RetryingBlockFetcher(conf, blockFetchStarter, blockIds, listener).start();
} else {
blockFetchStarter.createAndStart(blockIds, listener);
}
} catch (Exception e) {
logger.error("Exception while beginning fetchBlocks", e);
for (String blockId : blockIds) {
listener.onBlockFetchFailure(blockId, e);
}
}
}
@Override
public MetricSet shuffleMetrics() {
checkInit();
return clientFactory.getAllMetrics();
}
/**
* Registers this executor with an external shuffle server. This registration is required to
* inform the shuffle server about where and how we store our shuffle files.
*
* @param host Host of shuffle server.
* @param port Port of shuffle server.
* @param execId This Executor's id.
* @param executorInfo Contains all info necessary for the service to find our shuffle files.
*/
public void registerWithShuffleServer(
String host,
int port,
String execId,
ExecutorShuffleInfo executorInfo) throws IOException, InterruptedException {
checkInit();
try (TransportClient client = clientFactory.createUnmanagedClient(host, port)) {
ByteBuffer registerMessage = new RegisterExecutor(appId, execId, executorInfo).toByteBuffer();
client.sendRpcSync(registerMessage, registrationTimeoutMs);
}
}
@Override
public void close() {
checkInit();
if (clientFactory != null) {
clientFactory.close();
clientFactory = null;
}
}
}
| 9,893 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ShuffleIndexRecord.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
/**
* Contains offset and length of the shuffle block data.
*/
public class ShuffleIndexRecord {
private final long offset;
private final long length;
public ShuffleIndexRecord(long offset, long length) {
this.offset = offset;
this.length = length;
}
public long getOffset() {
return offset;
}
public long getLength() {
return length;
}
}
| 9,894 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/RetryingBlockFetcher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import java.io.IOException;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.Uninterruptibles;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.util.NettyUtils;
import org.apache.spark.network.util.TransportConf;
/**
* Wraps another BlockFetcher with the ability to automatically retry fetches which fail due to
* IOExceptions, which we hope are due to transient network conditions.
*
* This fetcher provides stronger guarantees regarding the parent BlockFetchingListener. In
* particular, the listener will be invoked exactly once per blockId, with a success or failure.
*/
public class RetryingBlockFetcher {
/**
* Used to initiate the first fetch for all blocks, and subsequently for retrying the fetch on any
* remaining blocks.
*/
public interface BlockFetchStarter {
/**
* Creates a new BlockFetcher to fetch the given block ids which may do some synchronous
* bootstrapping followed by fully asynchronous block fetching.
* The BlockFetcher must eventually invoke the Listener on every input blockId, or else this
* method must throw an exception.
*
* This method should always attempt to get a new TransportClient from the
* {@link org.apache.spark.network.client.TransportClientFactory} in order to fix connection
* issues.
*/
void createAndStart(String[] blockIds, BlockFetchingListener listener)
throws IOException, InterruptedException;
}
/** Shared executor service used for waiting and retrying. */
private static final ExecutorService executorService = Executors.newCachedThreadPool(
NettyUtils.createThreadFactory("Block Fetch Retry"));
private static final Logger logger = LoggerFactory.getLogger(RetryingBlockFetcher.class);
/** Used to initiate new Block Fetches on our remaining blocks. */
private final BlockFetchStarter fetchStarter;
/** Parent listener which we delegate all successful or permanently failed block fetches to. */
private final BlockFetchingListener listener;
/** Max number of times we are allowed to retry. */
private final int maxRetries;
/** Milliseconds to wait before each retry. */
private final int retryWaitTime;
// NOTE:
// All of our non-final fields are synchronized under 'this' and should only be accessed/mutated
// while inside a synchronized block.
/** Number of times we've attempted to retry so far. */
private int retryCount = 0;
/**
* Set of all block ids which have not been fetched successfully or with a non-IO Exception.
* A retry involves requesting every outstanding block. Note that since this is a LinkedHashSet,
* input ordering is preserved, so we always request blocks in the same order the user provided.
*/
private final LinkedHashSet<String> outstandingBlocksIds;
/**
* The BlockFetchingListener that is active with our current BlockFetcher.
* When we start a retry, we immediately replace this with a new Listener, which causes all any
* old Listeners to ignore all further responses.
*/
private RetryingBlockFetchListener currentListener;
public RetryingBlockFetcher(
TransportConf conf,
BlockFetchStarter fetchStarter,
String[] blockIds,
BlockFetchingListener listener) {
this.fetchStarter = fetchStarter;
this.listener = listener;
this.maxRetries = conf.maxIORetries();
this.retryWaitTime = conf.ioRetryWaitTimeMs();
this.outstandingBlocksIds = Sets.newLinkedHashSet();
Collections.addAll(outstandingBlocksIds, blockIds);
this.currentListener = new RetryingBlockFetchListener();
}
/**
* Initiates the fetch of all blocks provided in the constructor, with possible retries in the
* event of transient IOExceptions.
*/
public void start() {
fetchAllOutstanding();
}
/**
* Fires off a request to fetch all blocks that have not been fetched successfully or permanently
* failed (i.e., by a non-IOException).
*/
private void fetchAllOutstanding() {
// Start by retrieving our shared state within a synchronized block.
String[] blockIdsToFetch;
int numRetries;
RetryingBlockFetchListener myListener;
synchronized (this) {
blockIdsToFetch = outstandingBlocksIds.toArray(new String[outstandingBlocksIds.size()]);
numRetries = retryCount;
myListener = currentListener;
}
// Now initiate the fetch on all outstanding blocks, possibly initiating a retry if that fails.
try {
fetchStarter.createAndStart(blockIdsToFetch, myListener);
} catch (Exception e) {
logger.error(String.format("Exception while beginning fetch of %s outstanding blocks %s",
blockIdsToFetch.length, numRetries > 0 ? "(after " + numRetries + " retries)" : ""), e);
if (shouldRetry(e)) {
initiateRetry();
} else {
for (String bid : blockIdsToFetch) {
listener.onBlockFetchFailure(bid, e);
}
}
}
}
/**
* Lightweight method which initiates a retry in a different thread. The retry will involve
* calling fetchAllOutstanding() after a configured wait time.
*/
private synchronized void initiateRetry() {
retryCount += 1;
currentListener = new RetryingBlockFetchListener();
logger.info("Retrying fetch ({}/{}) for {} outstanding blocks after {} ms",
retryCount, maxRetries, outstandingBlocksIds.size(), retryWaitTime);
executorService.submit(() -> {
Uninterruptibles.sleepUninterruptibly(retryWaitTime, TimeUnit.MILLISECONDS);
fetchAllOutstanding();
});
}
/**
* Returns true if we should retry due a block fetch failure. We will retry if and only if
* the exception was an IOException and we haven't retried 'maxRetries' times already.
*/
private synchronized boolean shouldRetry(Throwable e) {
boolean isIOException = e instanceof IOException
|| (e.getCause() != null && e.getCause() instanceof IOException);
boolean hasRemainingRetries = retryCount < maxRetries;
return isIOException && hasRemainingRetries;
}
/**
* Our RetryListener intercepts block fetch responses and forwards them to our parent listener.
* Note that in the event of a retry, we will immediately replace the 'currentListener' field,
* indicating that any responses from non-current Listeners should be ignored.
*/
private class RetryingBlockFetchListener implements BlockFetchingListener {
@Override
public void onBlockFetchSuccess(String blockId, ManagedBuffer data) {
// We will only forward this success message to our parent listener if this block request is
// outstanding and we are still the active listener.
boolean shouldForwardSuccess = false;
synchronized (RetryingBlockFetcher.this) {
if (this == currentListener && outstandingBlocksIds.contains(blockId)) {
outstandingBlocksIds.remove(blockId);
shouldForwardSuccess = true;
}
}
// Now actually invoke the parent listener, outside of the synchronized block.
if (shouldForwardSuccess) {
listener.onBlockFetchSuccess(blockId, data);
}
}
@Override
public void onBlockFetchFailure(String blockId, Throwable exception) {
// We will only forward this failure to our parent listener if this block request is
// outstanding, we are still the active listener, AND we cannot retry the fetch.
boolean shouldForwardFailure = false;
synchronized (RetryingBlockFetcher.this) {
if (this == currentListener && outstandingBlocksIds.contains(blockId)) {
if (shouldRetry(exception)) {
initiateRetry();
} else {
logger.error(String.format("Failed to fetch block %s, and will not retry (%s retries)",
blockId, retryCount), exception);
outstandingBlocksIds.remove(blockId);
shouldForwardFailure = true;
}
}
}
// Now actually invoke the parent listener, outside of the synchronized block.
if (shouldForwardFailure) {
listener.onBlockFetchFailure(blockId, exception);
}
}
}
}
| 9,895 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ShuffleClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import java.io.Closeable;
import java.util.Collections;
import com.codahale.metrics.MetricSet;
/** Provides an interface for reading shuffle files, either from an Executor or external service. */
public abstract class ShuffleClient implements Closeable {
/**
* Initializes the ShuffleClient, specifying this Executor's appId.
* Must be called before any other method on the ShuffleClient.
*/
public void init(String appId) { }
/**
* Fetch a sequence of blocks from a remote node asynchronously,
*
* Note that this API takes a sequence so the implementation can batch requests, and does not
* return a future so the underlying implementation can invoke onBlockFetchSuccess as soon as
* the data of a block is fetched, rather than waiting for all blocks to be fetched.
*
* @param host the host of the remote node.
* @param port the port of the remote node.
* @param execId the executor id.
* @param blockIds block ids to fetch.
* @param listener the listener to receive block fetching status.
* @param downloadFileManager DownloadFileManager to create and clean temp files.
* If it's not <code>null</code>, the remote blocks will be streamed
* into temp shuffle files to reduce the memory usage, otherwise,
* they will be kept in memory.
*/
public abstract void fetchBlocks(
String host,
int port,
String execId,
String[] blockIds,
BlockFetchingListener listener,
DownloadFileManager downloadFileManager);
/**
* Get the shuffle MetricsSet from ShuffleClient, this will be used in MetricsSystem to
* get the Shuffle related metrics.
*/
public MetricSet shuffleMetrics() {
// Return an empty MetricSet by default.
return () -> Collections.emptyMap();
}
}
| 9,896 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleBlockHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Meter;
import com.codahale.metrics.Metric;
import com.codahale.metrics.MetricSet;
import com.codahale.metrics.Timer;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.client.RpcResponseCallback;
import org.apache.spark.network.client.TransportClient;
import org.apache.spark.network.server.OneForOneStreamManager;
import org.apache.spark.network.server.RpcHandler;
import org.apache.spark.network.server.StreamManager;
import org.apache.spark.network.shuffle.ExternalShuffleBlockResolver.AppExecId;
import org.apache.spark.network.shuffle.protocol.*;
import static org.apache.spark.network.util.NettyUtils.getRemoteAddress;
import org.apache.spark.network.util.TransportConf;
/**
* RPC Handler for a server which can serve shuffle blocks from outside of an Executor process.
*
* Handles registering executors and opening shuffle blocks from them. Shuffle blocks are registered
* with the "one-for-one" strategy, meaning each Transport-layer Chunk is equivalent to one Spark-
* level shuffle block.
*/
public class ExternalShuffleBlockHandler extends RpcHandler {
private static final Logger logger = LoggerFactory.getLogger(ExternalShuffleBlockHandler.class);
@VisibleForTesting
final ExternalShuffleBlockResolver blockManager;
private final OneForOneStreamManager streamManager;
private final ShuffleMetrics metrics;
public ExternalShuffleBlockHandler(TransportConf conf, File registeredExecutorFile)
throws IOException {
this(new OneForOneStreamManager(),
new ExternalShuffleBlockResolver(conf, registeredExecutorFile));
}
/** Enables mocking out the StreamManager and BlockManager. */
@VisibleForTesting
public ExternalShuffleBlockHandler(
OneForOneStreamManager streamManager,
ExternalShuffleBlockResolver blockManager) {
this.metrics = new ShuffleMetrics();
this.streamManager = streamManager;
this.blockManager = blockManager;
}
@Override
public void receive(TransportClient client, ByteBuffer message, RpcResponseCallback callback) {
BlockTransferMessage msgObj = BlockTransferMessage.Decoder.fromByteBuffer(message);
handleMessage(msgObj, client, callback);
}
protected void handleMessage(
BlockTransferMessage msgObj,
TransportClient client,
RpcResponseCallback callback) {
if (msgObj instanceof OpenBlocks) {
final Timer.Context responseDelayContext = metrics.openBlockRequestLatencyMillis.time();
try {
OpenBlocks msg = (OpenBlocks) msgObj;
checkAuth(client, msg.appId);
long streamId = streamManager.registerStream(client.getClientId(),
new ManagedBufferIterator(msg.appId, msg.execId, msg.blockIds), client.getChannel());
if (logger.isTraceEnabled()) {
logger.trace("Registered streamId {} with {} buffers for client {} from host {}",
streamId,
msg.blockIds.length,
client.getClientId(),
getRemoteAddress(client.getChannel()));
}
callback.onSuccess(new StreamHandle(streamId, msg.blockIds.length).toByteBuffer());
} finally {
responseDelayContext.stop();
}
} else if (msgObj instanceof RegisterExecutor) {
final Timer.Context responseDelayContext =
metrics.registerExecutorRequestLatencyMillis.time();
try {
RegisterExecutor msg = (RegisterExecutor) msgObj;
checkAuth(client, msg.appId);
blockManager.registerExecutor(msg.appId, msg.execId, msg.executorInfo);
callback.onSuccess(ByteBuffer.wrap(new byte[0]));
} finally {
responseDelayContext.stop();
}
} else {
throw new UnsupportedOperationException("Unexpected message: " + msgObj);
}
}
public MetricSet getAllMetrics() {
return metrics;
}
@Override
public StreamManager getStreamManager() {
return streamManager;
}
/**
* Removes an application (once it has been terminated), and optionally will clean up any
* local directories associated with the executors of that application in a separate thread.
*/
public void applicationRemoved(String appId, boolean cleanupLocalDirs) {
blockManager.applicationRemoved(appId, cleanupLocalDirs);
}
/**
* Clean up any non-shuffle files in any local directories associated with an finished executor.
*/
public void executorRemoved(String executorId, String appId) {
blockManager.executorRemoved(executorId, appId);
}
/**
* Register an (application, executor) with the given shuffle info.
*
* The "re-" is meant to highlight the intended use of this method -- when this service is
* restarted, this is used to restore the state of executors from before the restart. Normal
* registration will happen via a message handled in receive()
*
* @param appExecId
* @param executorInfo
*/
public void reregisterExecutor(AppExecId appExecId, ExecutorShuffleInfo executorInfo) {
blockManager.registerExecutor(appExecId.appId, appExecId.execId, executorInfo);
}
public void close() {
blockManager.close();
}
private void checkAuth(TransportClient client, String appId) {
if (client.getClientId() != null && !client.getClientId().equals(appId)) {
throw new SecurityException(String.format(
"Client for %s not authorized for application %s.", client.getClientId(), appId));
}
}
/**
* A simple class to wrap all shuffle service wrapper metrics
*/
private class ShuffleMetrics implements MetricSet {
private final Map<String, Metric> allMetrics;
// Time latency for open block request in ms
private final Timer openBlockRequestLatencyMillis = new Timer();
// Time latency for executor registration latency in ms
private final Timer registerExecutorRequestLatencyMillis = new Timer();
// Block transfer rate in byte per second
private final Meter blockTransferRateBytes = new Meter();
private ShuffleMetrics() {
allMetrics = new HashMap<>();
allMetrics.put("openBlockRequestLatencyMillis", openBlockRequestLatencyMillis);
allMetrics.put("registerExecutorRequestLatencyMillis", registerExecutorRequestLatencyMillis);
allMetrics.put("blockTransferRateBytes", blockTransferRateBytes);
allMetrics.put("registeredExecutorsSize",
(Gauge<Integer>) () -> blockManager.getRegisteredExecutorsSize());
}
@Override
public Map<String, Metric> getMetrics() {
return allMetrics;
}
}
private class ManagedBufferIterator implements Iterator<ManagedBuffer> {
private int index = 0;
private final String appId;
private final String execId;
private final int shuffleId;
// An array containing mapId and reduceId pairs.
private final int[] mapIdAndReduceIds;
ManagedBufferIterator(String appId, String execId, String[] blockIds) {
this.appId = appId;
this.execId = execId;
String[] blockId0Parts = blockIds[0].split("_");
if (blockId0Parts.length != 4 || !blockId0Parts[0].equals("shuffle")) {
throw new IllegalArgumentException("Unexpected shuffle block id format: " + blockIds[0]);
}
this.shuffleId = Integer.parseInt(blockId0Parts[1]);
mapIdAndReduceIds = new int[2 * blockIds.length];
for (int i = 0; i < blockIds.length; i++) {
String[] blockIdParts = blockIds[i].split("_");
if (blockIdParts.length != 4 || !blockIdParts[0].equals("shuffle")) {
throw new IllegalArgumentException("Unexpected shuffle block id format: " + blockIds[i]);
}
if (Integer.parseInt(blockIdParts[1]) != shuffleId) {
throw new IllegalArgumentException("Expected shuffleId=" + shuffleId +
", got:" + blockIds[i]);
}
mapIdAndReduceIds[2 * i] = Integer.parseInt(blockIdParts[2]);
mapIdAndReduceIds[2 * i + 1] = Integer.parseInt(blockIdParts[3]);
}
}
@Override
public boolean hasNext() {
return index < mapIdAndReduceIds.length;
}
@Override
public ManagedBuffer next() {
final ManagedBuffer block = blockManager.getBlockData(appId, execId, shuffleId,
mapIdAndReduceIds[index], mapIdAndReduceIds[index + 1]);
index += 2;
metrics.blockTransferRateBytes.mark(block != null ? block.size() : 0);
return block;
}
}
}
| 9,897 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/DownloadFileManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import org.apache.spark.network.util.TransportConf;
/**
* A manager to create temp block files used when fetching remote data to reduce the memory usage.
* It will clean files when they won't be used any more.
*/
public interface DownloadFileManager {
/** Create a temp block file. */
DownloadFile createTempFile(TransportConf transportConf);
/**
* Register a temp file to clean up when it won't be used any more. Return whether the
* file is registered successfully. If `false`, the caller should clean up the file by itself.
*/
boolean registerTempFileToClean(DownloadFile file);
}
| 9,898 |
0 | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleBlockResolver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.shuffle;
import java.io.*;
import java.nio.charset.StandardCharsets;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Objects;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.cache.Weigher;
import com.google.common.collect.Maps;
import org.iq80.leveldb.DB;
import org.iq80.leveldb.DBIterator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.buffer.FileSegmentManagedBuffer;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo;
import org.apache.spark.network.util.LevelDBProvider;
import org.apache.spark.network.util.LevelDBProvider.StoreVersion;
import org.apache.spark.network.util.JavaUtils;
import org.apache.spark.network.util.NettyUtils;
import org.apache.spark.network.util.TransportConf;
/**
* Manages converting shuffle BlockIds into physical segments of local files, from a process outside
* of Executors. Each Executor must register its own configuration about where it stores its files
* (local dirs) and how (shuffle manager). The logic for retrieval of individual files is replicated
* from Spark's IndexShuffleBlockResolver.
*/
public class ExternalShuffleBlockResolver {
private static final Logger logger = LoggerFactory.getLogger(ExternalShuffleBlockResolver.class);
private static final ObjectMapper mapper = new ObjectMapper();
/**
* This a common prefix to the key for each app registration we stick in leveldb, so they
* are easy to find, since leveldb lets you search based on prefix.
*/
private static final String APP_KEY_PREFIX = "AppExecShuffleInfo";
private static final StoreVersion CURRENT_VERSION = new StoreVersion(1, 0);
private static final Pattern MULTIPLE_SEPARATORS = Pattern.compile(File.separator + "{2,}");
// Map containing all registered executors' metadata.
@VisibleForTesting
final ConcurrentMap<AppExecId, ExecutorShuffleInfo> executors;
/**
* Caches index file information so that we can avoid open/close the index files
* for each block fetch.
*/
private final LoadingCache<File, ShuffleIndexInformation> shuffleIndexCache;
// Single-threaded Java executor used to perform expensive recursive directory deletion.
private final Executor directoryCleaner;
private final TransportConf conf;
@VisibleForTesting
final File registeredExecutorFile;
@VisibleForTesting
final DB db;
private final List<String> knownManagers = Arrays.asList(
"org.apache.spark.shuffle.sort.SortShuffleManager",
"org.apache.spark.shuffle.unsafe.UnsafeShuffleManager");
public ExternalShuffleBlockResolver(TransportConf conf, File registeredExecutorFile)
throws IOException {
this(conf, registeredExecutorFile, Executors.newSingleThreadExecutor(
// Add `spark` prefix because it will run in NM in Yarn mode.
NettyUtils.createThreadFactory("spark-shuffle-directory-cleaner")));
}
// Allows tests to have more control over when directories are cleaned up.
@VisibleForTesting
ExternalShuffleBlockResolver(
TransportConf conf,
File registeredExecutorFile,
Executor directoryCleaner) throws IOException {
this.conf = conf;
this.registeredExecutorFile = registeredExecutorFile;
String indexCacheSize = conf.get("spark.shuffle.service.index.cache.size", "100m");
CacheLoader<File, ShuffleIndexInformation> indexCacheLoader =
new CacheLoader<File, ShuffleIndexInformation>() {
public ShuffleIndexInformation load(File file) throws IOException {
return new ShuffleIndexInformation(file);
}
};
shuffleIndexCache = CacheBuilder.newBuilder()
.maximumWeight(JavaUtils.byteStringAsBytes(indexCacheSize))
.weigher(new Weigher<File, ShuffleIndexInformation>() {
public int weigh(File file, ShuffleIndexInformation indexInfo) {
return indexInfo.getSize();
}
})
.build(indexCacheLoader);
db = LevelDBProvider.initLevelDB(this.registeredExecutorFile, CURRENT_VERSION, mapper);
if (db != null) {
executors = reloadRegisteredExecutors(db);
} else {
executors = Maps.newConcurrentMap();
}
this.directoryCleaner = directoryCleaner;
}
public int getRegisteredExecutorsSize() {
return executors.size();
}
/** Registers a new Executor with all the configuration we need to find its shuffle files. */
public void registerExecutor(
String appId,
String execId,
ExecutorShuffleInfo executorInfo) {
AppExecId fullId = new AppExecId(appId, execId);
logger.info("Registered executor {} with {}", fullId, executorInfo);
if (!knownManagers.contains(executorInfo.shuffleManager)) {
throw new UnsupportedOperationException(
"Unsupported shuffle manager of executor: " + executorInfo);
}
try {
if (db != null) {
byte[] key = dbAppExecKey(fullId);
byte[] value = mapper.writeValueAsString(executorInfo).getBytes(StandardCharsets.UTF_8);
db.put(key, value);
}
} catch (Exception e) {
logger.error("Error saving registered executors", e);
}
executors.put(fullId, executorInfo);
}
/**
* Obtains a FileSegmentManagedBuffer from (shuffleId, mapId, reduceId). We make assumptions
* about how the hash and sort based shuffles store their data.
*/
public ManagedBuffer getBlockData(
String appId,
String execId,
int shuffleId,
int mapId,
int reduceId) {
ExecutorShuffleInfo executor = executors.get(new AppExecId(appId, execId));
if (executor == null) {
throw new RuntimeException(
String.format("Executor is not registered (appId=%s, execId=%s)", appId, execId));
}
return getSortBasedShuffleBlockData(executor, shuffleId, mapId, reduceId);
}
/**
* Removes our metadata of all executors registered for the given application, and optionally
* also deletes the local directories associated with the executors of that application in a
* separate thread.
*
* It is not valid to call registerExecutor() for an executor with this appId after invoking
* this method.
*/
public void applicationRemoved(String appId, boolean cleanupLocalDirs) {
logger.info("Application {} removed, cleanupLocalDirs = {}", appId, cleanupLocalDirs);
Iterator<Map.Entry<AppExecId, ExecutorShuffleInfo>> it = executors.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<AppExecId, ExecutorShuffleInfo> entry = it.next();
AppExecId fullId = entry.getKey();
final ExecutorShuffleInfo executor = entry.getValue();
// Only touch executors associated with the appId that was removed.
if (appId.equals(fullId.appId)) {
it.remove();
if (db != null) {
try {
db.delete(dbAppExecKey(fullId));
} catch (IOException e) {
logger.error("Error deleting {} from executor state db", appId, e);
}
}
if (cleanupLocalDirs) {
logger.info("Cleaning up executor {}'s {} local dirs", fullId, executor.localDirs.length);
// Execute the actual deletion in a different thread, as it may take some time.
directoryCleaner.execute(() -> deleteExecutorDirs(executor.localDirs));
}
}
}
}
/**
* Removes all the non-shuffle files in any local directories associated with the finished
* executor.
*/
public void executorRemoved(String executorId, String appId) {
logger.info("Clean up non-shuffle files associated with the finished executor {}", executorId);
AppExecId fullId = new AppExecId(appId, executorId);
final ExecutorShuffleInfo executor = executors.get(fullId);
if (executor == null) {
// Executor not registered, skip clean up of the local directories.
logger.info("Executor is not registered (appId={}, execId={})", appId, executorId);
} else {
logger.info("Cleaning up non-shuffle files in executor {}'s {} local dirs", fullId,
executor.localDirs.length);
// Execute the actual deletion in a different thread, as it may take some time.
directoryCleaner.execute(() -> deleteNonShuffleFiles(executor.localDirs));
}
}
/**
* Synchronously deletes each directory one at a time.
* Should be executed in its own thread, as this may take a long time.
*/
private void deleteExecutorDirs(String[] dirs) {
for (String localDir : dirs) {
try {
JavaUtils.deleteRecursively(new File(localDir));
logger.debug("Successfully cleaned up directory: {}", localDir);
} catch (Exception e) {
logger.error("Failed to delete directory: " + localDir, e);
}
}
}
/**
* Synchronously deletes non-shuffle files in each directory recursively.
* Should be executed in its own thread, as this may take a long time.
*/
private void deleteNonShuffleFiles(String[] dirs) {
FilenameFilter filter = new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
// Don't delete shuffle data or shuffle index files.
return !name.endsWith(".index") && !name.endsWith(".data");
}
};
for (String localDir : dirs) {
try {
JavaUtils.deleteRecursively(new File(localDir), filter);
logger.debug("Successfully cleaned up non-shuffle files in directory: {}", localDir);
} catch (Exception e) {
logger.error("Failed to delete non-shuffle files in directory: " + localDir, e);
}
}
}
/**
* Sort-based shuffle data uses an index called "shuffle_ShuffleId_MapId_0.index" into a data file
* called "shuffle_ShuffleId_MapId_0.data". This logic is from IndexShuffleBlockResolver,
* and the block id format is from ShuffleDataBlockId and ShuffleIndexBlockId.
*/
private ManagedBuffer getSortBasedShuffleBlockData(
ExecutorShuffleInfo executor, int shuffleId, int mapId, int reduceId) {
File indexFile = getFile(executor.localDirs, executor.subDirsPerLocalDir,
"shuffle_" + shuffleId + "_" + mapId + "_0.index");
try {
ShuffleIndexInformation shuffleIndexInformation = shuffleIndexCache.get(indexFile);
ShuffleIndexRecord shuffleIndexRecord = shuffleIndexInformation.getIndex(reduceId);
return new FileSegmentManagedBuffer(
conf,
getFile(executor.localDirs, executor.subDirsPerLocalDir,
"shuffle_" + shuffleId + "_" + mapId + "_0.data"),
shuffleIndexRecord.getOffset(),
shuffleIndexRecord.getLength());
} catch (ExecutionException e) {
throw new RuntimeException("Failed to open file: " + indexFile, e);
}
}
/**
* Hashes a filename into the corresponding local directory, in a manner consistent with
* Spark's DiskBlockManager.getFile().
*/
@VisibleForTesting
static File getFile(String[] localDirs, int subDirsPerLocalDir, String filename) {
int hash = JavaUtils.nonNegativeHash(filename);
String localDir = localDirs[hash % localDirs.length];
int subDirId = (hash / localDirs.length) % subDirsPerLocalDir;
return new File(createNormalizedInternedPathname(
localDir, String.format("%02x", subDirId), filename));
}
void close() {
if (db != null) {
try {
db.close();
} catch (IOException e) {
logger.error("Exception closing leveldb with registered executors", e);
}
}
}
/**
* This method is needed to avoid the situation when multiple File instances for the
* same pathname "foo/bar" are created, each with a separate copy of the "foo/bar" String.
* According to measurements, in some scenarios such duplicate strings may waste a lot
* of memory (~ 10% of the heap). To avoid that, we intern the pathname, and before that
* we make sure that it's in a normalized form (contains no "//", "///" etc.) Otherwise,
* the internal code in java.io.File would normalize it later, creating a new "foo/bar"
* String copy. Unfortunately, we cannot just reuse the normalization code that java.io.File
* uses, since it is in the package-private class java.io.FileSystem.
*/
@VisibleForTesting
static String createNormalizedInternedPathname(String dir1, String dir2, String fname) {
String pathname = dir1 + File.separator + dir2 + File.separator + fname;
Matcher m = MULTIPLE_SEPARATORS.matcher(pathname);
pathname = m.replaceAll("/");
// A single trailing slash needs to be taken care of separately
if (pathname.length() > 1 && pathname.endsWith("/")) {
pathname = pathname.substring(0, pathname.length() - 1);
}
return pathname.intern();
}
/** Simply encodes an executor's full ID, which is appId + execId. */
public static class AppExecId {
public final String appId;
public final String execId;
@JsonCreator
public AppExecId(@JsonProperty("appId") String appId, @JsonProperty("execId") String execId) {
this.appId = appId;
this.execId = execId;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AppExecId appExecId = (AppExecId) o;
return Objects.equal(appId, appExecId.appId) && Objects.equal(execId, appExecId.execId);
}
@Override
public int hashCode() {
return Objects.hashCode(appId, execId);
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("appId", appId)
.add("execId", execId)
.toString();
}
}
private static byte[] dbAppExecKey(AppExecId appExecId) throws IOException {
// we stick a common prefix on all the keys so we can find them in the DB
String appExecJson = mapper.writeValueAsString(appExecId);
String key = (APP_KEY_PREFIX + ";" + appExecJson);
return key.getBytes(StandardCharsets.UTF_8);
}
private static AppExecId parseDbAppExecKey(String s) throws IOException {
if (!s.startsWith(APP_KEY_PREFIX)) {
throw new IllegalArgumentException("expected a string starting with " + APP_KEY_PREFIX);
}
String json = s.substring(APP_KEY_PREFIX.length() + 1);
AppExecId parsed = mapper.readValue(json, AppExecId.class);
return parsed;
}
@VisibleForTesting
static ConcurrentMap<AppExecId, ExecutorShuffleInfo> reloadRegisteredExecutors(DB db)
throws IOException {
ConcurrentMap<AppExecId, ExecutorShuffleInfo> registeredExecutors = Maps.newConcurrentMap();
if (db != null) {
DBIterator itr = db.iterator();
itr.seek(APP_KEY_PREFIX.getBytes(StandardCharsets.UTF_8));
while (itr.hasNext()) {
Map.Entry<byte[], byte[]> e = itr.next();
String key = new String(e.getKey(), StandardCharsets.UTF_8);
if (!key.startsWith(APP_KEY_PREFIX)) {
break;
}
AppExecId id = parseDbAppExecKey(key);
logger.info("Reloading registered executors: " + id.toString());
ExecutorShuffleInfo shuffleInfo = mapper.readValue(e.getValue(), ExecutorShuffleInfo.class);
registeredExecutors.put(id, shuffleInfo);
}
}
return registeredExecutors;
}
}
| 9,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.