index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/PbUnpack.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.rpc.model.UnPack;
import java.io.ByteArrayInputStream;
import java.io.IOException;
public class PbUnpack<T> implements UnPack {
private final Class<T> clz;
public PbUnpack(Class<T> clz) {
this.clz = clz;
}
@Override
public Object unpack(byte[] data) throws IOException {
final ByteArrayInputStream bais = new ByteArrayInputStream(data);
return SingleProtobufUtils.deserialize(bais, clz);
}
}
| 6,100 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/TripleCustomerProtocolWapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.common.utils.Assert;
import org.apache.dubbo.common.utils.CollectionUtils;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
public class TripleCustomerProtocolWapper {
static int makeTag(int fieldNumber, int wireType) {
return fieldNumber << 3 | wireType;
}
public static byte[] varIntEncode(int val) {
byte[] data = new byte[varIntComputeLength(val)];
for (int i = 0; i < data.length - 1; i++) {
data[i] = (byte) ((val & 0x7F) | 0x80);
val = val >>> 7;
}
data[data.length - 1] = (byte) (val);
return data;
}
public static int varIntComputeLength(int val) {
if (val == 0) {
return 1;
}
int length = 0;
while (val != 0) {
val = val >>> 7;
length++;
}
return length;
}
public static int readRawVarint32(ByteBuffer byteBuffer) {
int val = 0;
int currentPosition = byteBuffer.position();
int varIntLength = 1;
byte currentByte = byteBuffer.get();
while ((currentByte & 0XF0) >> 7 == 1) {
varIntLength++;
currentByte = byteBuffer.get();
}
for (int index = currentPosition + varIntLength - 1; index >= currentPosition; index--) {
val = val << 7;
val = val | (byteBuffer.get(index) & 0x7F);
}
byteBuffer.position(currentPosition + varIntLength);
return val;
}
public static int extractFieldNumFromTag(int tag) {
return tag >> 3;
}
public static int extractWireTypeFromTag(int tag) {
return tag & 0X07;
}
public static final class TripleResponseWrapper {
private String serializeType;
private byte[] data;
private String type;
public String getSerializeType() {
return serializeType;
}
public byte[] getData() {
return data;
}
public String getType() {
return type;
}
public static TripleResponseWrapper parseFrom(byte[] data) {
TripleResponseWrapper tripleResponseWrapper = new TripleResponseWrapper();
ByteBuffer byteBuffer = ByteBuffer.wrap(data);
while (byteBuffer.position() < byteBuffer.limit()) {
int tag = readRawVarint32(byteBuffer);
int fieldNum = extractFieldNumFromTag(tag);
int wireType = extractWireTypeFromTag(tag);
if (wireType != 2) {
throw new RuntimeException(String.format("unexpect wireType, expect %d realType %d", 2, wireType));
}
if (fieldNum == 1) {
int serializeTypeLength = readRawVarint32(byteBuffer);
byte[] serializeTypeBytes = new byte[serializeTypeLength];
byteBuffer.get(serializeTypeBytes, 0, serializeTypeLength);
tripleResponseWrapper.serializeType = new String(serializeTypeBytes);
} else if (fieldNum == 2) {
int dataLength = readRawVarint32(byteBuffer);
byte[] dataBytes = new byte[dataLength];
byteBuffer.get(dataBytes, 0, dataLength);
tripleResponseWrapper.data = dataBytes;
} else if (fieldNum == 3) {
int typeLength = readRawVarint32(byteBuffer);
byte[] typeBytes = new byte[typeLength];
byteBuffer.get(typeBytes, 0, typeLength);
tripleResponseWrapper.type = new String(typeBytes);
} else {
throw new RuntimeException("fieldNum should in (1,2,3)");
}
}
return tripleResponseWrapper;
}
public byte[] toByteArray() {
int totalSize = 0;
int serializeTypeTag = makeTag(1, 2);
byte[] serializeTypeTagBytes = varIntEncode(serializeTypeTag);
byte[] serializeTypeBytes = serializeType.getBytes(StandardCharsets.UTF_8);
byte[] serializeTypeLengthVarIntEncodeBytes = varIntEncode(serializeTypeBytes.length);
totalSize += serializeTypeTagBytes.length
+ serializeTypeLengthVarIntEncodeBytes.length
+ serializeTypeBytes.length;
int dataTag = makeTag(2, 2);
if (data != null) {
totalSize += varIntComputeLength(dataTag) + varIntComputeLength(data.length) + data.length;
}
int typeTag = makeTag(3, 2);
byte[] typeTagBytes = varIntEncode(typeTag);
byte[] typeBytes = type.getBytes(StandardCharsets.UTF_8);
byte[] typeLengthVarIntEncodeBytes = varIntEncode(typeBytes.length);
totalSize += typeTagBytes.length + typeLengthVarIntEncodeBytes.length + typeBytes.length;
ByteBuffer byteBuffer = ByteBuffer.allocate(totalSize);
byteBuffer
.put(serializeTypeTagBytes)
.put(serializeTypeLengthVarIntEncodeBytes)
.put(serializeTypeBytes);
if (data != null) {
byteBuffer
.put(varIntEncode(dataTag))
.put(varIntEncode(data.length))
.put(data);
}
byteBuffer.put(typeTagBytes).put(typeLengthVarIntEncodeBytes).put(typeBytes);
return byteBuffer.array();
}
public static final class Builder {
private String serializeType;
private byte[] data;
private String type;
public Builder setSerializeType(String serializeType) {
this.serializeType = serializeType;
return this;
}
public Builder setData(byte[] data) {
this.data = data;
return this;
}
public Builder setType(String type) {
this.type = type;
return this;
}
public static Builder newBuilder() {
return new Builder();
}
public TripleResponseWrapper build() {
Assert.notNull(serializeType, "serializeType can not be null");
Assert.notNull(type, "type can not be null");
TripleResponseWrapper tripleResponseWrapper = new TripleResponseWrapper();
tripleResponseWrapper.data = this.data;
tripleResponseWrapper.serializeType = this.serializeType;
tripleResponseWrapper.type = this.type;
return tripleResponseWrapper;
}
}
}
public static final class TripleRequestWrapper {
private String serializeType;
private List<byte[]> args;
private List<String> argTypes;
public String getSerializeType() {
return serializeType;
}
public List<byte[]> getArgs() {
return args;
}
public List<String> getArgTypes() {
return argTypes;
}
public TripleRequestWrapper() {}
public static TripleRequestWrapper parseFrom(byte[] data) {
TripleRequestWrapper tripleRequestWrapper = new TripleRequestWrapper();
ByteBuffer byteBuffer = ByteBuffer.wrap(data);
tripleRequestWrapper.args = new ArrayList<>();
tripleRequestWrapper.argTypes = new ArrayList<>();
while (byteBuffer.position() < byteBuffer.limit()) {
int tag = readRawVarint32(byteBuffer);
int fieldNum = extractFieldNumFromTag(tag);
int wireType = extractWireTypeFromTag(tag);
if (wireType != 2) {
throw new RuntimeException(String.format("unexpect wireType, expect %d realType %d", 2, wireType));
}
if (fieldNum == 1) {
int serializeTypeLength = readRawVarint32(byteBuffer);
byte[] serializeTypeBytes = new byte[serializeTypeLength];
byteBuffer.get(serializeTypeBytes, 0, serializeTypeLength);
tripleRequestWrapper.serializeType = new String(serializeTypeBytes);
} else if (fieldNum == 2) {
int argLength = readRawVarint32(byteBuffer);
byte[] argBytes = new byte[argLength];
byteBuffer.get(argBytes, 0, argLength);
tripleRequestWrapper.args.add(argBytes);
} else if (fieldNum == 3) {
int argTypeLength = readRawVarint32(byteBuffer);
byte[] argTypeBytes = new byte[argTypeLength];
byteBuffer.get(argTypeBytes, 0, argTypeLength);
tripleRequestWrapper.argTypes.add(new String(argTypeBytes));
} else {
throw new RuntimeException("fieldNum should in (1,2,3)");
}
}
return tripleRequestWrapper;
}
public byte[] toByteArray() {
int totalSize = 0;
int serializeTypeTag = makeTag(1, 2);
byte[] serializeTypeTagBytes = varIntEncode(serializeTypeTag);
byte[] serializeTypeBytes = serializeType.getBytes(StandardCharsets.UTF_8);
byte[] serializeTypeLengthVarIntEncodeBytes = varIntEncode(serializeTypeBytes.length);
totalSize += serializeTypeTagBytes.length
+ serializeTypeLengthVarIntEncodeBytes.length
+ serializeTypeBytes.length;
int argTypeTag = makeTag(3, 2);
if (CollectionUtils.isNotEmpty(argTypes)) {
totalSize += varIntComputeLength(argTypeTag) * argTypes.size();
for (String argType : argTypes) {
byte[] argTypeBytes = argType.getBytes(StandardCharsets.UTF_8);
totalSize += argTypeBytes.length + varIntComputeLength(argTypeBytes.length);
}
}
int argTag = makeTag(2, 2);
if (CollectionUtils.isNotEmpty(args)) {
totalSize += varIntComputeLength(argTag) * args.size();
for (byte[] arg : args) {
totalSize += arg.length + varIntComputeLength(arg.length);
}
}
ByteBuffer byteBuffer = ByteBuffer.allocate(totalSize);
byteBuffer
.put(serializeTypeTagBytes)
.put(serializeTypeLengthVarIntEncodeBytes)
.put(serializeTypeBytes);
if (CollectionUtils.isNotEmpty(args)) {
byte[] argTagBytes = varIntEncode(argTag);
for (byte[] arg : args) {
byteBuffer.put(argTagBytes).put(varIntEncode(arg.length)).put(arg);
}
}
if (CollectionUtils.isNotEmpty(argTypes)) {
byte[] argTypeTagBytes = varIntEncode(argTypeTag);
for (String argType : argTypes) {
byte[] argTypeBytes = argType.getBytes(StandardCharsets.UTF_8);
byteBuffer
.put(argTypeTagBytes)
.put(varIntEncode(argTypeBytes.length))
.put(argTypeBytes);
}
}
return byteBuffer.array();
}
public static final class Builder {
private String serializeType;
private final List<byte[]> args = new ArrayList<>();
private final List<String> argTypes = new ArrayList<>();
public Builder setSerializeType(String serializeType) {
this.serializeType = serializeType;
return this;
}
public Builder addArgTypes(String argsType) {
Assert.notEmptyString(argsType, "argsType cannot be empty.");
argTypes.add(argsType);
return this;
}
public Builder addArgs(byte[] arg) {
args.add(arg);
return this;
}
public static Builder newBuilder() {
return new Builder();
}
public TripleRequestWrapper build() {
Assert.notNull(serializeType, "serializeType can not be null");
TripleRequestWrapper tripleRequestWrapper = new TripleRequestWrapper();
tripleRequestWrapper.args = this.args;
tripleRequestWrapper.argTypes = this.argTypes;
tripleRequestWrapper.serializeType = this.serializeType;
return tripleRequestWrapper;
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof TripleRequestWrapper)) {
return false;
}
TripleRequestWrapper that = (TripleRequestWrapper) o;
return Objects.equals(serializeType, that.serializeType)
&& Objects.equals(args, that.args)
&& Objects.equals(argTypes, that.argTypes);
}
@Override
public int hashCode() {
return Objects.hash(serializeType, args, argTypes);
}
}
}
| 6,101 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/ClientStreamObserver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.common.stream.StreamObserver;
import org.apache.dubbo.rpc.protocol.tri.observer.CallStreamObserver;
public interface ClientStreamObserver<T> extends CallStreamObserver<T> {
/**
* Swaps to manual flow control where no message will be delivered to {@link
* StreamObserver#onNext(Object)} unless it is {@link #request request()}ed. Since {@code
* request()} may not be called before the call is started, a number of initial requests may be
* specified.
*/
default void disableAutoRequest() {
disableAutoFlowControl();
}
}
| 6,102 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/GrpcHttp2Protocol.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.common.extension.Activate;
@Activate
public class GrpcHttp2Protocol extends TripleHttp2Protocol {}
| 6,103 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/ServerStreamObserver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.rpc.protocol.tri.observer.CallStreamObserver;
public interface ServerStreamObserver<T> extends CallStreamObserver<T> {
default void disableAutoInboundFlowControl() {
disableAutoFlowControl();
}
}
| 6,104 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/SingleProtobufUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.common.utils.ConcurrentHashMapUtils;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.lang.reflect.InvocationTargetException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import com.google.protobuf.BoolValue;
import com.google.protobuf.BytesValue;
import com.google.protobuf.DoubleValue;
import com.google.protobuf.Empty;
import com.google.protobuf.EnumValue;
import com.google.protobuf.ExtensionRegistryLite;
import com.google.protobuf.FloatValue;
import com.google.protobuf.Int32Value;
import com.google.protobuf.Int64Value;
import com.google.protobuf.InvalidProtocolBufferException;
import com.google.protobuf.ListValue;
import com.google.protobuf.Message;
import com.google.protobuf.MessageLite;
import com.google.protobuf.Parser;
import com.google.protobuf.StringValue;
public class SingleProtobufUtils {
private static final ConcurrentHashMap<Class<?>, Message> INST_CACHE = new ConcurrentHashMap<>();
private static final ExtensionRegistryLite GLOBAL_REGISTRY = ExtensionRegistryLite.getEmptyRegistry();
private static final ConcurrentMap<Class<?>, SingleMessageMarshaller<?>> MARSHALLER_CACHE =
new ConcurrentHashMap<>();
static {
// Built-in types need to be registered in advance
marshaller(Empty.getDefaultInstance());
marshaller(BoolValue.getDefaultInstance());
marshaller(Int32Value.getDefaultInstance());
marshaller(Int64Value.getDefaultInstance());
marshaller(FloatValue.getDefaultInstance());
marshaller(DoubleValue.getDefaultInstance());
marshaller(BytesValue.getDefaultInstance());
marshaller(StringValue.getDefaultInstance());
marshaller(EnumValue.getDefaultInstance());
marshaller(ListValue.getDefaultInstance());
}
static boolean isSupported(Class<?> clazz) {
if (clazz == null) {
return false;
}
return MessageLite.class.isAssignableFrom(clazz);
}
public static <T extends MessageLite> void marshaller(T defaultInstance) {
MARSHALLER_CACHE.put(defaultInstance.getClass(), new SingleMessageMarshaller<>(defaultInstance));
}
@SuppressWarnings("all")
public static Message defaultInst(Class<?> clz) {
Message defaultInst = INST_CACHE.get(clz);
if (defaultInst != null) {
return defaultInst;
}
try {
defaultInst = (Message) clz.getMethod("getDefaultInstance").invoke(null);
} catch (IllegalAccessException | InvocationTargetException | NoSuchMethodException e) {
throw new RuntimeException("Create default protobuf instance failed ", e);
}
INST_CACHE.put(clz, defaultInst);
return defaultInst;
}
@SuppressWarnings("all")
public static <T> Parser<T> getParser(Class<T> clz) {
Message defaultInst = defaultInst(clz);
return (Parser<T>) defaultInst.getParserForType();
}
public static <T> T deserialize(InputStream in, Class<T> clz) throws IOException {
if (!isSupported(clz)) {
throw new IllegalArgumentException("This serialization only support google protobuf messages, but the "
+ "actual input type is :" + clz.getName());
}
try {
return (T) getMarshaller(clz).parse(in);
} catch (InvalidProtocolBufferException e) {
throw new IOException(e);
}
}
public static void serialize(Object obj, OutputStream os) throws IOException {
final MessageLite msg = (MessageLite) obj;
msg.writeTo(os);
}
private static SingleMessageMarshaller<?> getMarshaller(Class<?> clz) {
return ConcurrentHashMapUtils.computeIfAbsent(MARSHALLER_CACHE, clz, k -> new SingleMessageMarshaller(k));
}
public static final class SingleMessageMarshaller<T extends MessageLite> {
private final Parser<T> parser;
private final T defaultInstance;
SingleMessageMarshaller(Class<T> clz) {
this.defaultInstance = (T) defaultInst(clz);
this.parser = (Parser<T>) defaultInstance.getParserForType();
}
SingleMessageMarshaller(T defaultInstance) {
this.defaultInstance = defaultInstance;
this.parser = (Parser<T>) defaultInstance.getParserForType();
}
public T parse(InputStream stream) throws InvalidProtocolBufferException {
return parser.parseFrom(stream, GLOBAL_REGISTRY);
}
}
}
| 6,105 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/RequestMetadata.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.rpc.CancellationContext;
import org.apache.dubbo.rpc.model.MethodDescriptor;
import org.apache.dubbo.rpc.model.PackableMethod;
import org.apache.dubbo.rpc.protocol.tri.compressor.Compressor;
import org.apache.dubbo.rpc.protocol.tri.compressor.Identity;
import org.apache.dubbo.rpc.protocol.tri.stream.StreamUtils;
import java.util.Map;
import io.netty.handler.codec.http.HttpHeaderNames;
import io.netty.handler.codec.http.HttpHeaderValues;
import io.netty.handler.codec.http.HttpMethod;
import io.netty.handler.codec.http2.DefaultHttp2Headers;
import io.netty.util.AsciiString;
public class RequestMetadata {
public AsciiString scheme;
public String application;
public String service;
public String version;
public String group;
public String address;
public String acceptEncoding;
public String timeout;
public Compressor compressor;
public CancellationContext cancellationContext;
public MethodDescriptor method;
public PackableMethod packableMethod;
public Map<String, Object> attachments;
public boolean convertNoLowerHeader;
public boolean ignoreDefaultVersion;
public DefaultHttp2Headers toHeaders() {
DefaultHttp2Headers header = new DefaultHttp2Headers(false);
header.scheme(scheme)
.authority(address)
.method(HttpMethod.POST.asciiName())
.path("/" + service + "/" + method.getMethodName())
.set(TripleHeaderEnum.CONTENT_TYPE_KEY.getHeader(), TripleConstant.CONTENT_PROTO)
.set(HttpHeaderNames.TE, HttpHeaderValues.TRAILERS);
setIfNotNull(header, TripleHeaderEnum.TIMEOUT.getHeader(), timeout);
if (!ignoreDefaultVersion || !"1.0.0".equals(version)) {
setIfNotNull(header, TripleHeaderEnum.SERVICE_VERSION.getHeader(), version);
}
setIfNotNull(header, TripleHeaderEnum.SERVICE_GROUP.getHeader(), group);
setIfNotNull(header, TripleHeaderEnum.CONSUMER_APP_NAME_KEY.getHeader(), application);
setIfNotNull(header, TripleHeaderEnum.GRPC_ACCEPT_ENCODING.getHeader(), acceptEncoding);
if (!Identity.MESSAGE_ENCODING.equals(compressor.getMessageEncoding())) {
setIfNotNull(header, TripleHeaderEnum.GRPC_ENCODING.getHeader(), compressor.getMessageEncoding());
}
StreamUtils.convertAttachment(header, attachments, convertNoLowerHeader);
return header;
}
private void setIfNotNull(DefaultHttp2Headers headers, CharSequence key, CharSequence value) {
if (value == null) {
return;
}
headers.set(key, value);
}
}
| 6,106 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/TripleProtocol.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.common.config.ConfigurationUtils;
import org.apache.dubbo.common.logger.Logger;
import org.apache.dubbo.common.logger.LoggerFactory;
import org.apache.dubbo.common.threadpool.manager.ExecutorRepository;
import org.apache.dubbo.common.utils.ExecutorUtil;
import org.apache.dubbo.remoting.api.connection.AbstractConnectionClient;
import org.apache.dubbo.remoting.api.pu.DefaultPuHandler;
import org.apache.dubbo.remoting.exchange.PortUnificationExchanger;
import org.apache.dubbo.rpc.Exporter;
import org.apache.dubbo.rpc.Invoker;
import org.apache.dubbo.rpc.PathResolver;
import org.apache.dubbo.rpc.RpcException;
import org.apache.dubbo.rpc.model.ApplicationModel;
import org.apache.dubbo.rpc.model.FrameworkModel;
import org.apache.dubbo.rpc.protocol.AbstractExporter;
import org.apache.dubbo.rpc.protocol.AbstractProtocol;
import org.apache.dubbo.rpc.protocol.tri.compressor.DeCompressor;
import org.apache.dubbo.rpc.protocol.tri.service.TriBuiltinService;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import io.grpc.health.v1.HealthCheckResponse;
import io.grpc.health.v1.HealthCheckResponse.ServingStatus;
import static org.apache.dubbo.common.constants.CommonConstants.DEFAULT_CLIENT_THREADPOOL;
import static org.apache.dubbo.common.constants.CommonConstants.THREADPOOL_KEY;
import static org.apache.dubbo.common.constants.CommonConstants.THREAD_NAME_KEY;
import static org.apache.dubbo.config.Constants.CLIENT_THREAD_POOL_NAME;
import static org.apache.dubbo.config.Constants.SERVER_THREAD_POOL_NAME;
import static org.apache.dubbo.rpc.Constants.H2_IGNORE_1_0_0_KEY;
import static org.apache.dubbo.rpc.Constants.H2_RESOLVE_FALLBACK_TO_DEFAULT_KEY;
import static org.apache.dubbo.rpc.Constants.H2_SUPPORT_NO_LOWER_HEADER_KEY;
public class TripleProtocol extends AbstractProtocol {
private static final Logger logger = LoggerFactory.getLogger(TripleProtocol.class);
private final PathResolver pathResolver;
private final TriBuiltinService triBuiltinService;
private final String acceptEncodings;
/**
* There is only one
*/
public static boolean CONVERT_NO_LOWER_HEADER = false;
public static boolean IGNORE_1_0_0_VERSION = false;
public static boolean RESOLVE_FALLBACK_TO_DEFAULT = true;
public TripleProtocol(FrameworkModel frameworkModel) {
this.frameworkModel = frameworkModel;
this.triBuiltinService = new TriBuiltinService(frameworkModel);
this.pathResolver =
frameworkModel.getExtensionLoader(PathResolver.class).getDefaultExtension();
CONVERT_NO_LOWER_HEADER = ConfigurationUtils.getEnvConfiguration(ApplicationModel.defaultModel())
.getBoolean(H2_SUPPORT_NO_LOWER_HEADER_KEY, true);
IGNORE_1_0_0_VERSION = ConfigurationUtils.getEnvConfiguration(ApplicationModel.defaultModel())
.getBoolean(H2_IGNORE_1_0_0_KEY, false);
RESOLVE_FALLBACK_TO_DEFAULT = ConfigurationUtils.getEnvConfiguration(ApplicationModel.defaultModel())
.getBoolean(H2_RESOLVE_FALLBACK_TO_DEFAULT_KEY, true);
Set<String> supported =
frameworkModel.getExtensionLoader(DeCompressor.class).getSupportedExtensions();
this.acceptEncodings = String.join(",", supported);
}
@Override
public int getDefaultPort() {
return 50051;
}
@Override
public <T> Exporter<T> export(Invoker<T> invoker) throws RpcException {
URL url = invoker.getUrl();
String key = serviceKey(url);
final AbstractExporter<T> exporter = new AbstractExporter<T>(invoker) {
@Override
public void afterUnExport() {
pathResolver.remove(url.getServiceKey());
pathResolver.remove(url.getServiceModel().getServiceModel().getInterfaceName());
// set service status
if (triBuiltinService.enable()) {
triBuiltinService
.getHealthStatusManager()
.setStatus(url.getServiceKey(), ServingStatus.NOT_SERVING);
triBuiltinService
.getHealthStatusManager()
.setStatus(url.getServiceInterface(), ServingStatus.NOT_SERVING);
}
exporterMap.remove(key);
}
};
exporterMap.put(key, exporter);
invokers.add(invoker);
Invoker<?> previous = pathResolver.add(url.getServiceKey(), invoker);
if (previous != null) {
if (url.getServiceKey()
.equals(url.getServiceModel().getServiceModel().getInterfaceName())) {
logger.info("Already exists an invoker[" + previous.getUrl() + "] on path[" + url.getServiceKey()
+ "], dubbo will override with invoker[" + url + "]");
} else {
throw new IllegalStateException(
"Already exists an invoker[" + previous.getUrl() + "] on path[" + url.getServiceKey()
+ "], failed to add invoker[" + url + "] , please use unique serviceKey.");
}
}
if (RESOLVE_FALLBACK_TO_DEFAULT) {
previous = pathResolver.addIfAbsent(
url.getServiceModel().getServiceModel().getInterfaceName(), invoker);
if (previous != null) {
logger.info("Already exists an invoker[" + previous.getUrl() + "] on path["
+ url.getServiceModel().getServiceModel().getInterfaceName()
+ "], dubbo will skip override with invoker["
+ url + "]");
} else {
logger.info("Add fallback triple invoker[" + url + "] to path["
+ url.getServiceModel().getServiceModel().getInterfaceName() + "] with invoker[" + url + "]");
}
}
// set service status
if (triBuiltinService.enable()) {
triBuiltinService
.getHealthStatusManager()
.setStatus(url.getServiceKey(), HealthCheckResponse.ServingStatus.SERVING);
triBuiltinService
.getHealthStatusManager()
.setStatus(url.getServiceInterface(), HealthCheckResponse.ServingStatus.SERVING);
}
// init
ExecutorRepository.getInstance(url.getOrDefaultApplicationModel())
.createExecutorIfAbsent(ExecutorUtil.setThreadName(url, SERVER_THREAD_POOL_NAME));
PortUnificationExchanger.bind(url, new DefaultPuHandler());
optimizeSerialization(url);
return exporter;
}
@Override
public <T> Invoker<T> refer(Class<T> type, URL url) throws RpcException {
optimizeSerialization(url);
ExecutorService streamExecutor = getOrCreateStreamExecutor(url.getOrDefaultApplicationModel(), url);
AbstractConnectionClient connectionClient = PortUnificationExchanger.connect(url, new DefaultPuHandler());
TripleInvoker<T> invoker =
new TripleInvoker<>(type, url, acceptEncodings, connectionClient, invokers, streamExecutor);
invokers.add(invoker);
return invoker;
}
private ExecutorService getOrCreateStreamExecutor(ApplicationModel applicationModel, URL url) {
url = url.addParameter(THREAD_NAME_KEY, CLIENT_THREAD_POOL_NAME)
.addParameterIfAbsent(THREADPOOL_KEY, DEFAULT_CLIENT_THREADPOOL);
ExecutorService executor =
ExecutorRepository.getInstance(applicationModel).createExecutorIfAbsent(url);
Objects.requireNonNull(executor, String.format("No available executor found in %s", url));
return executor;
}
@Override
protected <T> Invoker<T> protocolBindingRefer(Class<T> type, URL url) throws RpcException {
return null;
}
@Override
public void destroy() {
if (logger.isInfoEnabled()) {
logger.info("Destroying protocol [" + this.getClass().getSimpleName() + "] ...");
}
PortUnificationExchanger.close();
pathResolver.destroy();
super.destroy();
}
}
| 6,107 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/ClassLoadUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
public class ClassLoadUtil {
public static void switchContextLoader(ClassLoader loader) {
try {
if (loader != null && loader != Thread.currentThread().getContextClassLoader()) {
Thread.currentThread().setContextClassLoader(loader);
}
} catch (SecurityException e) {
// ignore , ForkJoinPool & jdk8 & securityManager will cause this
}
}
}
| 6,108 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/ExceptionUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.common.utils.CollectionUtils;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;
public class ExceptionUtils {
private static final int NOT_FOUND = -1;
public static String getStackTrace(final Throwable throwable) {
final StringWriter sw = new StringWriter();
final PrintWriter pw = new PrintWriter(sw, true);
throwable.printStackTrace(pw);
return sw.getBuffer().toString();
}
public static String getStackFrameString(List<String> stackFrameList) {
if (CollectionUtils.isEmpty(stackFrameList)) {
return "";
}
StringBuilder stringBuilder = new StringBuilder();
for (String s : stackFrameList) {
stringBuilder.append(s).append("\n");
}
return stringBuilder.toString();
}
public static String[] getStackFrames(final Throwable throwable) {
if (throwable == null) {
return new String[0];
}
return getStackFrames(getStackTrace(throwable));
}
static String[] getStackFrames(final String stackTrace) {
final String linebreak = System.lineSeparator();
final StringTokenizer frames = new StringTokenizer(stackTrace, linebreak);
final List<String> list = new ArrayList<>();
while (frames.hasMoreTokens()) {
list.add(frames.nextToken());
}
return list.toArray(new String[0]);
}
public static List<String> getStackFrameList(final Throwable t, int maxDepth) {
final String stackTrace = getStackTrace(t);
final String linebreak = System.lineSeparator();
final StringTokenizer frames = new StringTokenizer(stackTrace, linebreak);
final List<String> list = new ArrayList<>();
for (int i = 0; i < maxDepth && frames.hasMoreTokens(); i++) {
list.add(frames.nextToken());
}
return list;
}
public static List<String> getStackFrameList(final Throwable t) {
return getStackFrameList(t, Integer.MAX_VALUE);
}
}
| 6,109 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/TriHttp2RemoteFlowController.java | /*
* Copyright 2014 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.common.config.Configuration;
import org.apache.dubbo.common.config.ConfigurationUtils;
import org.apache.dubbo.rpc.model.ApplicationModel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.http2.Http2Connection;
import io.netty.handler.codec.http2.Http2ConnectionAdapter;
import io.netty.handler.codec.http2.Http2Error;
import io.netty.handler.codec.http2.Http2Exception;
import io.netty.handler.codec.http2.Http2RemoteFlowController;
import io.netty.handler.codec.http2.Http2Stream;
import io.netty.handler.codec.http2.Http2StreamVisitor;
import io.netty.handler.codec.http2.StreamByteDistributor;
import io.netty.handler.codec.http2.WeightedFairQueueByteDistributor;
import io.netty.util.internal.UnstableApi;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory;
import java.util.ArrayDeque;
import java.util.Deque;
import static io.netty.handler.codec.http2.Http2CodecUtil.DEFAULT_WINDOW_SIZE;
import static io.netty.handler.codec.http2.Http2CodecUtil.MAX_WEIGHT;
import static io.netty.handler.codec.http2.Http2CodecUtil.MIN_WEIGHT;
import static io.netty.handler.codec.http2.Http2Error.FLOW_CONTROL_ERROR;
import static io.netty.handler.codec.http2.Http2Error.INTERNAL_ERROR;
import static io.netty.handler.codec.http2.Http2Error.STREAM_CLOSED;
import static io.netty.handler.codec.http2.Http2Exception.streamError;
import static io.netty.handler.codec.http2.Http2Stream.State.HALF_CLOSED_LOCAL;
import static io.netty.util.internal.ObjectUtil.checkNotNull;
import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero;
import static java.lang.Math.max;
import static java.lang.Math.min;
import static org.apache.dubbo.rpc.Constants.H2_SETTINGS_INITIAL_WINDOW_SIZE_KEY;
/**
* This design is learning from {@see io.netty.handler.codec.http2.DefaultHttp2RemoteFlowController} which is in Netty.
*/
@UnstableApi
public class TriHttp2RemoteFlowController implements Http2RemoteFlowController {
private static final InternalLogger logger =
InternalLoggerFactory.getInstance(TriHttp2RemoteFlowController.class);
private static final int MIN_WRITABLE_CHUNK = 32 * 1024;
private final Http2Connection connection;
private final Http2Connection.PropertyKey stateKey;
private final StreamByteDistributor streamByteDistributor;
private final FlowState connectionState;
private final Configuration config;
private int initialWindowSize;
private WritabilityMonitor monitor;
private ChannelHandlerContext ctx;
public TriHttp2RemoteFlowController(Http2Connection connection, ApplicationModel applicationModel) {
this(connection, (Listener) null, applicationModel);
}
public TriHttp2RemoteFlowController(Http2Connection connection,
StreamByteDistributor streamByteDistributor,
ApplicationModel applicationModel) {
this(connection, streamByteDistributor, null, applicationModel);
}
public TriHttp2RemoteFlowController(Http2Connection connection, final Listener listener, ApplicationModel applicationModel) {
this(connection, new WeightedFairQueueByteDistributor(connection), listener, applicationModel);
}
public TriHttp2RemoteFlowController(Http2Connection connection,
StreamByteDistributor streamByteDistributor,
final Listener listener,
ApplicationModel applicationModel) {
this.connection = checkNotNull(connection, "connection");
this.streamByteDistributor = checkNotNull(streamByteDistributor, "streamWriteDistributor");
this.config = ConfigurationUtils.getGlobalConfiguration(applicationModel);
this.initialWindowSize = config.getInt(H2_SETTINGS_INITIAL_WINDOW_SIZE_KEY, DEFAULT_WINDOW_SIZE);
// Add a flow state for the connection.
stateKey = connection.newKey();
connectionState = new FlowState(connection.connectionStream());
connection.connectionStream().setProperty(stateKey, connectionState);
// Monitor may depend upon connectionState, and so initialize after connectionState
listener(listener);
monitor.windowSize(connectionState, initialWindowSize);
// Register for notification of new streams.
connection.addListener(new Http2ConnectionAdapter() {
@Override
public void onStreamAdded(Http2Stream stream) {
// If the stream state is not open then the stream is not yet eligible for flow controlled frames and
// only requires the ReducedFlowState. Otherwise the full amount of memory is required.
stream.setProperty(stateKey, new FlowState(stream));
}
@Override
public void onStreamActive(Http2Stream stream) {
// If the object was previously created, but later activated then we have to ensure the proper
// initialWindowSize is used.
monitor.windowSize(state(stream), initialWindowSize);
}
@Override
public void onStreamClosed(Http2Stream stream) {
// Any pending frames can never be written, cancel and
// write errors for any pending frames.
state(stream).cancel(STREAM_CLOSED, null);
}
@Override
public void onStreamHalfClosed(Http2Stream stream) {
if (HALF_CLOSED_LOCAL == stream.state()) {
/*
* When this method is called there should not be any
* pending frames left if the API is used correctly. However,
* it is possible that a erroneous application can sneak
* in a frame even after having already written a frame with the
* END_STREAM flag set, as the stream state might not transition
* immediately to HALF_CLOSED_LOCAL / CLOSED due to flow control
* delaying the write.
*
* This is to cancel any such illegal writes.
*/
state(stream).cancel(STREAM_CLOSED, null);
}
}
});
}
/**
* {@inheritDoc}
* <p>
* Any queued {@link FlowControlled} objects will be sent.
*/
@Override
public void channelHandlerContext(ChannelHandlerContext ctx) throws Http2Exception {
this.ctx = checkNotNull(ctx, "ctx");
// Writing the pending bytes will not check writability change and instead a writability change notification
// to be provided by an explicit call.
channelWritabilityChanged();
// Don't worry about cleaning up queued frames here if ctx is null. It is expected that all streams will be
// closed and the queue cleanup will occur when the stream state transitions occur.
// If any frames have been queued up, we should send them now that we have a channel context.
if (isChannelWritable()) {
writePendingBytes();
}
}
@Override
public ChannelHandlerContext channelHandlerContext() {
return ctx;
}
@Override
public void initialWindowSize(int newWindowSize) throws Http2Exception {
assert ctx == null || ctx.executor().inEventLoop();
monitor.initialWindowSize(newWindowSize);
}
@Override
public int initialWindowSize() {
return initialWindowSize;
}
@Override
public int windowSize(Http2Stream stream) {
return state(stream).windowSize();
}
@Override
public boolean isWritable(Http2Stream stream) {
return monitor.isWritable(state(stream));
}
@Override
public void channelWritabilityChanged() throws Http2Exception {
monitor.channelWritabilityChange();
}
@Override
public void updateDependencyTree(int childStreamId, int parentStreamId, short weight, boolean exclusive) {
// It is assumed there are all validated at a higher level. For example in the Http2FrameReader.
assert weight >= MIN_WEIGHT && weight <= MAX_WEIGHT : "Invalid weight";
assert childStreamId != parentStreamId : "A stream cannot depend on itself";
assert childStreamId > 0 && parentStreamId >= 0 : "childStreamId must be > 0. parentStreamId must be >= 0.";
streamByteDistributor.updateDependencyTree(childStreamId, parentStreamId, weight, exclusive);
}
private boolean isChannelWritable() {
return ctx != null && isChannelWritable0();
}
private boolean isChannelWritable0() {
return ctx.channel().isWritable();
}
@Override
public void listener(Listener listener) {
monitor = listener == null ? new WritabilityMonitor() : new ListenerWritabilityMonitor(listener);
}
@Override
public void incrementWindowSize(Http2Stream stream, int delta) throws Http2Exception {
assert ctx == null || ctx.executor().inEventLoop();
monitor.incrementWindowSize(state(stream), delta);
}
@Override
public void addFlowControlled(Http2Stream stream, FlowControlled frame) {
// The context can be null assuming the frame will be queued and send later when the context is set.
assert ctx == null || ctx.executor().inEventLoop();
checkNotNull(frame, "frame");
try {
monitor.enqueueFrame(state(stream), frame);
} catch (Throwable t) {
frame.error(ctx, t);
}
}
@Override
public boolean hasFlowControlled(Http2Stream stream) {
return state(stream).hasFrame();
}
private FlowState state(Http2Stream stream) {
return (FlowState) stream.getProperty(stateKey);
}
/**
* Returns the flow control window for the entire connection.
*/
private int connectionWindowSize() {
return connectionState.windowSize();
}
private int minUsableChannelBytes() {
// The current allocation algorithm values "fairness" and doesn't give any consideration to "goodput". It
// is possible that 1 byte will be allocated to many streams. In an effort to try to make "goodput"
// reasonable with the current allocation algorithm we have this "cheap" check up front to ensure there is
// an "adequate" amount of connection window before allocation is attempted. This is not foolproof as if the
// number of streams is >= this minimal number then we may still have the issue, but the idea is to narrow the
// circumstances in which this can happen without rewriting the allocation algorithm.
return max(ctx.channel().config().getWriteBufferLowWaterMark(), MIN_WRITABLE_CHUNK);
}
private int maxUsableChannelBytes() {
// If the channel isWritable, allow at least minUsableChannelBytes.
int channelWritableBytes = (int) min(Integer.MAX_VALUE, ctx.channel().bytesBeforeUnwritable());
int usableBytes = channelWritableBytes > 0 ? max(channelWritableBytes, minUsableChannelBytes()) : 0;
// Clip the usable bytes by the connection window.
return min(connectionState.windowSize(), usableBytes);
}
/**
* The amount of bytes that can be supported by underlying {@link io.netty.channel.Channel} without
* queuing "too-much".
*/
private int writableBytes() {
return min(connectionWindowSize(), maxUsableChannelBytes());
}
@Override
public void writePendingBytes() throws Http2Exception {
monitor.writePendingBytes();
}
/**
* The remote flow control state for a single stream.
*/
private final class FlowState implements StreamByteDistributor.StreamState {
private final Http2Stream stream;
private final Deque<FlowControlled> pendingWriteQueue;
private int window;
private long pendingBytes;
private boolean markedWritable;
/**
* Set to true while a frame is being written, false otherwise.
*/
private boolean writing;
/**
* Set to true if cancel() was called.
*/
private boolean cancelled;
FlowState(Http2Stream stream) {
this.stream = stream;
pendingWriteQueue = new ArrayDeque<FlowControlled>(2);
}
/**
* Determine if the stream associated with this object is writable.
* @return {@code true} if the stream associated with this object is writable.
*/
boolean isWritable() {
return windowSize() > pendingBytes() && !cancelled;
}
/**
* The stream this state is associated with.
*/
@Override
public Http2Stream stream() {
return stream;
}
/**
* Returns the parameter from the last call to {@link #markedWritability(boolean)}.
*/
boolean markedWritability() {
return markedWritable;
}
/**
* Save the state of writability.
*/
void markedWritability(boolean isWritable) {
this.markedWritable = isWritable;
}
@Override
public int windowSize() {
return window;
}
/**
* Reset the window size for this stream.
*/
void windowSize(int initialWindowSize) {
window = initialWindowSize;
}
/**
* Write the allocated bytes for this stream.
* @return the number of bytes written for a stream or {@code -1} if no write occurred.
*/
int writeAllocatedBytes(int allocated) {
final int initialAllocated = allocated;
int writtenBytes;
// In case an exception is thrown we want to remember it and pass it to cancel(Throwable).
Throwable cause = null;
FlowControlled frame;
try {
assert !writing;
writing = true;
// Write the remainder of frames that we are allowed to
boolean writeOccurred = false;
while (!cancelled && (frame = peek()) != null) {
int maxBytes = min(allocated, writableWindow());
if (maxBytes <= 0 && frame.size() > 0) {
// The frame still has data, but the amount of allocated bytes has been exhausted.
// Don't write needless empty frames.
break;
}
writeOccurred = true;
int initialFrameSize = frame.size();
try {
frame.write(ctx, max(0, maxBytes));
if (frame.size() == 0) {
// This frame has been fully written, remove this frame and notify it.
// Since we remove this frame first, we're guaranteed that its error
// method will not be called when we call cancel.
pendingWriteQueue.remove();
frame.writeComplete();
}
} finally {
// Decrement allocated by how much was actually written.
allocated -= initialFrameSize - frame.size();
}
}
if (!writeOccurred) {
// Either there was no frame, or the amount of allocated bytes has been exhausted.
return -1;
}
} catch (Throwable t) {
// Mark the state as cancelled, we'll clear the pending queue via cancel() below.
cancelled = true;
cause = t;
} finally {
writing = false;
// Make sure we always decrement the flow control windows
// by the bytes written.
writtenBytes = initialAllocated - allocated;
decrementPendingBytes(writtenBytes, false);
decrementFlowControlWindow(writtenBytes);
// If a cancellation occurred while writing, call cancel again to
// clear and error all of the pending writes.
if (cancelled) {
cancel(INTERNAL_ERROR, cause);
}
if(monitor.isOverFlowControl()){
cause = new Throwable();
cancel(FLOW_CONTROL_ERROR,cause);
}
}
return writtenBytes;
}
/**
* Increments the flow control window for this stream by the given delta and returns the new value.
*/
int incrementStreamWindow(int delta) throws Http2Exception {
if (delta > 0 && Integer.MAX_VALUE - delta < window) {
throw streamError(stream.id(), FLOW_CONTROL_ERROR,
"Window size overflow for stream: %d", stream.id());
}
window += delta;
streamByteDistributor.updateStreamableBytes(this);
return window;
}
/**
* Returns the maximum writable window (minimum of the stream and connection windows).
*/
private int writableWindow() {
return min(window, connectionWindowSize());
}
@Override
public long pendingBytes() {
return pendingBytes;
}
/**
* Adds the {@code frame} to the pending queue and increments the pending byte count.
*/
void enqueueFrame(FlowControlled frame) {
FlowControlled last = pendingWriteQueue.peekLast();
if (last == null) {
enqueueFrameWithoutMerge(frame);
return;
}
int lastSize = last.size();
if (last.merge(ctx, frame)) {
incrementPendingBytes(last.size() - lastSize, true);
return;
}
enqueueFrameWithoutMerge(frame);
}
private void enqueueFrameWithoutMerge(FlowControlled frame) {
pendingWriteQueue.offer(frame);
// This must be called after adding to the queue in order so that hasFrame() is
// updated before updating the stream state.
incrementPendingBytes(frame.size(), true);
}
@Override
public boolean hasFrame() {
return !pendingWriteQueue.isEmpty();
}
/**
* Returns the head of the pending queue, or {@code null} if empty.
*/
private FlowControlled peek() {
return pendingWriteQueue.peek();
}
/**
* Clears the pending queue and writes errors for each remaining frame.
* @param error the {@link Http2Error} to use.
* @param cause the {@link Throwable} that caused this method to be invoked.
*/
void cancel(Http2Error error, Throwable cause) {
cancelled = true;
// Ensure that the queue can't be modified while we are writing.
if (writing) {
return;
}
FlowControlled frame = pendingWriteQueue.poll();
if (frame != null) {
// Only create exception once and reuse to reduce overhead of filling in the stacktrace.
final Http2Exception exception = streamError(stream.id(), error, cause,
"Stream closed before write could take place");
do {
writeError(frame, exception);
frame = pendingWriteQueue.poll();
} while (frame != null);
}
streamByteDistributor.updateStreamableBytes(this);
monitor.stateCancelled(this);
}
/**
* Increments the number of pending bytes for this node and optionally updates the
* {@link StreamByteDistributor}.
*/
private void incrementPendingBytes(int numBytes, boolean updateStreamableBytes) {
pendingBytes += numBytes;
monitor.incrementPendingBytes(numBytes);
if (updateStreamableBytes) {
streamByteDistributor.updateStreamableBytes(this);
}
}
/**
* If this frame is in the pending queue, decrements the number of pending bytes for the stream.
*/
private void decrementPendingBytes(int bytes, boolean updateStreamableBytes) {
incrementPendingBytes(-bytes, updateStreamableBytes);
}
/**
* Decrement the per stream and connection flow control window by {@code bytes}.
*/
private void decrementFlowControlWindow(int bytes) {
try {
int negativeBytes = -bytes;
connectionState.incrementStreamWindow(negativeBytes);
incrementStreamWindow(negativeBytes);
} catch (Http2Exception e) {
// Should never get here since we're decrementing.
throw new IllegalStateException("Invalid window state when writing frame: " + e.getMessage(), e);
}
}
/**
* Discards this {@link FlowControlled}, writing an error. If this frame is in the pending queue,
* the unwritten bytes are removed from this branch of the priority tree.
*/
private void writeError(FlowControlled frame, Http2Exception cause) {
assert ctx != null;
decrementPendingBytes(frame.size(), true);
frame.error(ctx, cause);
}
}
/**
* Abstract class which provides common functionality for writability monitor implementations.
*/
private class WritabilityMonitor implements StreamByteDistributor.Writer {
private boolean inWritePendingBytes;
private long totalPendingBytes;
@Override
public final void write(Http2Stream stream, int numBytes) {
state(stream).writeAllocatedBytes(numBytes);
}
/**
* Called when the writability of the underlying channel changes.
* @throws Http2Exception If a write occurs and an exception happens in the write operation.
*/
void channelWritabilityChange() throws Http2Exception { }
/**
* Called when the state is cancelled.
* @param state the state that was cancelled.
*/
void stateCancelled(FlowState state) { }
/**
* Set the initial window size for {@code state}.
* @param state the state to change the initial window size for.
* @param initialWindowSize the size of the window in bytes.
*/
void windowSize(FlowState state, int initialWindowSize) {
state.windowSize(initialWindowSize);
}
/**
* Increment the window size for a particular stream.
* @param state the state associated with the stream whose window is being incremented.
* @param delta The amount to increment by.
* @throws Http2Exception If this operation overflows the window for {@code state}.
*/
void incrementWindowSize(FlowState state, int delta) throws Http2Exception {
state.incrementStreamWindow(delta);
}
/**
* Add a frame to be sent via flow control.
* @param state The state associated with the stream which the {@code frame} is associated with.
* @param frame the frame to enqueue.
* @throws Http2Exception If a writability error occurs.
*/
void enqueueFrame(FlowState state, FlowControlled frame) throws Http2Exception {
state.enqueueFrame(frame);
}
/**
* Increment the total amount of pending bytes for all streams. When any stream's pending bytes changes
* method should be called.
* @param delta The amount to increment by.
*/
final void incrementPendingBytes(int delta) {
totalPendingBytes += delta;
// Notification of writibilty change should be delayed until the end of the top level event.
// This is to ensure the flow controller is more consistent state before calling external listener methods.
}
/**
* Determine if the stream associated with {@code state} is writable.
* @param state The state which is associated with the stream to test writability for.
* @return {@code true} if {@link FlowState#stream()} is writable. {@code false} otherwise.
*/
final boolean isWritable(FlowState state) {
return isWritableConnection() && state.isWritable();
}
final void writePendingBytes() throws Http2Exception {
// Reentry is not permitted during the byte distribution process. It may lead to undesirable distribution of
// bytes and even infinite loops. We protect against reentry and make sure each call has an opportunity to
// cause a distribution to occur. This may be useful for example if the channel's writability changes from
// Writable -> Not Writable (because we are writing) -> Writable (because the user flushed to make more room
// in the channel outbound buffer).
if (inWritePendingBytes) {
return;
}
inWritePendingBytes = true;
try {
int bytesToWrite = writableBytes();
// Make sure we always write at least once, regardless if we have bytesToWrite or not.
// This ensures that zero-length frames will always be written.
for (;;) {
if (!streamByteDistributor.distribute(bytesToWrite, this) ||
(bytesToWrite = writableBytes()) <= 0 ||
!isChannelWritable0()) {
break;
}
}
} finally {
inWritePendingBytes = false;
}
}
void initialWindowSize(int newWindowSize) throws Http2Exception {
checkPositiveOrZero(newWindowSize, "newWindowSize");
final int delta = newWindowSize - initialWindowSize;
initialWindowSize = newWindowSize;
connection.forEachActiveStream(new Http2StreamVisitor() {
@Override
public boolean visit(Http2Stream stream) throws Http2Exception {
state(stream).incrementStreamWindow(delta);
return true;
}
});
if (delta > 0 && isChannelWritable()) {
// The window size increased, send any pending frames for all streams.
writePendingBytes();
}
}
final boolean isWritableConnection() {
return connectionState.windowSize() - totalPendingBytes > 0 && isChannelWritable();
}
final boolean isOverFlowControl() {
if(connectionState.windowSize() == 0){
return true;
}else {
return false;
}
}
}
/**
* Writability of a {@code stream} is calculated using the following:
* <pre>
* Connection Window - Total Queued Bytes > 0 &&
* Stream Window - Bytes Queued for Stream > 0 &&
* isChannelWritable()
* </pre>
*/
private final class ListenerWritabilityMonitor extends WritabilityMonitor implements Http2StreamVisitor {
private final Listener listener;
ListenerWritabilityMonitor(Listener listener) {
this.listener = listener;
}
@Override
public boolean visit(Http2Stream stream) throws Http2Exception {
FlowState state = state(stream);
if (isWritable(state) != state.markedWritability()) {
notifyWritabilityChanged(state);
}
return true;
}
@Override
void windowSize(FlowState state, int initialWindowSize) {
super.windowSize(state, initialWindowSize);
try {
checkStateWritability(state);
} catch (Http2Exception e) {
throw new RuntimeException("Caught unexpected exception from window", e);
}
}
@Override
void incrementWindowSize(FlowState state, int delta) throws Http2Exception {
super.incrementWindowSize(state, delta);
checkStateWritability(state);
}
@Override
void initialWindowSize(int newWindowSize) throws Http2Exception {
super.initialWindowSize(newWindowSize);
if (isWritableConnection()) {
// If the write operation does not occur we still need to check all streams because they
// may have transitioned from writable to not writable.
checkAllWritabilityChanged();
}
}
@Override
void enqueueFrame(FlowState state, FlowControlled frame) throws Http2Exception {
super.enqueueFrame(state, frame);
checkConnectionThenStreamWritabilityChanged(state);
}
@Override
void stateCancelled(FlowState state) {
try {
checkConnectionThenStreamWritabilityChanged(state);
} catch (Http2Exception e) {
throw new RuntimeException("Caught unexpected exception from checkAllWritabilityChanged", e);
}
}
@Override
void channelWritabilityChange() throws Http2Exception {
if (connectionState.markedWritability() != isChannelWritable()) {
checkAllWritabilityChanged();
}
}
private void checkStateWritability(FlowState state) throws Http2Exception {
if (isWritable(state) != state.markedWritability()) {
if (state == connectionState) {
checkAllWritabilityChanged();
} else {
notifyWritabilityChanged(state);
}
}
}
private void notifyWritabilityChanged(FlowState state) {
state.markedWritability(!state.markedWritability());
try {
listener.writabilityChanged(state.stream);
} catch (Throwable cause) {
logger.error("Caught Throwable from listener.writabilityChanged", cause);
}
}
private void checkConnectionThenStreamWritabilityChanged(FlowState state) throws Http2Exception {
// It is possible that the connection window and/or the individual stream writability could change.
if (isWritableConnection() != connectionState.markedWritability()) {
checkAllWritabilityChanged();
} else if (isWritable(state) != state.markedWritability()) {
notifyWritabilityChanged(state);
}else if(isOverFlowControl()){
throw streamError(state.stream().id(), FLOW_CONTROL_ERROR,
"TotalPendingBytes size overflow for stream: %d", state.stream().id());
}
}
private void checkAllWritabilityChanged() throws Http2Exception {
// Make sure we mark that we have notified as a result of this change.
connectionState.markedWritability(isWritableConnection());
connection.forEachActiveStream(this);
}
}
}
| 6,110 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/TripleConstant.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import io.netty.util.AsciiString;
public class TripleConstant {
public static final String CONTENT_PROTO = "application/grpc+proto";
public static final String APPLICATION_GRPC = "application/grpc";
public static final String TEXT_PLAIN_UTF8 = "text/plain; encoding=utf-8";
public static final String TRI_VERSION = "3.0-TRI";
public static final String SERIALIZATION_KEY = "serialization";
public static final String TE_KEY = "te";
public static final String HESSIAN4 = "hessian4";
public static final String HESSIAN2 = "hessian2";
public static final String HEADER_BIN_SUFFIX = "-bin";
public static final AsciiString HTTPS_SCHEME = AsciiString.of("https");
public static final AsciiString HTTP_SCHEME = AsciiString.of("http");
}
| 6,111 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/DeadlineFuture.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.common.logger.Logger;
import org.apache.dubbo.common.logger.LoggerFactory;
import org.apache.dubbo.common.resource.GlobalResourceInitializer;
import org.apache.dubbo.common.timer.HashedWheelTimer;
import org.apache.dubbo.common.timer.Timeout;
import org.apache.dubbo.common.timer.Timer;
import org.apache.dubbo.common.timer.TimerTask;
import org.apache.dubbo.common.utils.NamedThreadFactory;
import org.apache.dubbo.rpc.AppResponse;
import org.apache.dubbo.rpc.TriRpcStatus;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
public class DeadlineFuture extends CompletableFuture<AppResponse> {
private static final Logger LOGGER = LoggerFactory.getLogger(DeadlineFuture.class);
private final String serviceName;
private final String methodName;
private final String address;
private final int timeout;
private final long start = System.currentTimeMillis();
private final List<Runnable> timeoutListeners = new ArrayList<>();
private final Timeout timeoutTask;
private ExecutorService executor;
private static final GlobalResourceInitializer<Timer> TIME_OUT_TIMER = new GlobalResourceInitializer<>(
() -> new HashedWheelTimer(new NamedThreadFactory("dubbo-future-timeout", true), 30, TimeUnit.MILLISECONDS),
DeadlineFuture::destroy);
private DeadlineFuture(String serviceName, String methodName, String address, int timeout) {
this.serviceName = serviceName;
this.methodName = methodName;
this.address = address;
this.timeout = timeout;
TimeoutCheckTask timeoutCheckTask = new TimeoutCheckTask();
this.timeoutTask = TIME_OUT_TIMER.get().newTimeout(timeoutCheckTask, timeout, TimeUnit.MILLISECONDS);
}
public static void destroy() {
TIME_OUT_TIMER.remove(Timer::stop);
}
/**
* init a DeadlineFuture 1.init a DeadlineFuture 2.timeout check
*
* @param timeout timeout in Mills
* @return a new DeadlineFuture
*/
public static DeadlineFuture newFuture(
String serviceName, String methodName, String address, int timeout, ExecutorService executor) {
final DeadlineFuture future = new DeadlineFuture(serviceName, methodName, address, timeout);
future.setExecutor(executor);
return future;
}
public void received(TriRpcStatus status, AppResponse appResponse) {
if (status.code != TriRpcStatus.Code.DEADLINE_EXCEEDED && !timeoutTask.isCancelled()) {
timeoutTask.cancel();
}
if (getExecutor() != null) {
getExecutor().execute(() -> doReceived(status, appResponse));
} else {
doReceived(status, appResponse);
}
}
public void addTimeoutListener(Runnable runnable) {
timeoutListeners.add(runnable);
}
public List<Runnable> getTimeoutListeners() {
return timeoutListeners;
}
public ExecutorService getExecutor() {
return executor;
}
public void setExecutor(ExecutorService executor) {
this.executor = executor;
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
timeoutTask.cancel();
doReceived(TriRpcStatus.CANCELLED, new AppResponse(TriRpcStatus.CANCELLED.asException()));
return true;
}
public void cancel() {
this.cancel(true);
}
private void doReceived(TriRpcStatus status, AppResponse appResponse) {
if (isDone() || isCancelled() || isCompletedExceptionally()) {
return;
}
// Still needs to be discussed here, but for now, that's it
// Remove the judgment of status is ok,
// because the completelyExceptionally method will lead to the onError method in the filter,
// but there are also exceptions in the onResponse in the filter,which is a bit confusing.
// We recommend only handling onResponse in which onError is called for handling
this.complete(appResponse);
}
private String getTimeoutMessage() {
long nowTimestamp = System.currentTimeMillis();
return "Waiting server-side response timeout by scan timer. start time: "
+ (new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS").format(new Date(start)))
+ ", end time: " + (new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS").format(new Date(nowTimestamp)))
+ ", timeout: " + timeout + " ms, service: " + serviceName
+ ", method: " + methodName;
}
private class TimeoutCheckTask implements TimerTask {
@Override
public void run(Timeout timeout) {
if (DeadlineFuture.this.isDone()) {
return;
}
ExecutorService executor = getExecutor();
if (executor != null && !executor.isShutdown()) {
executor.execute(() -> {
notifyTimeout();
for (Runnable timeoutListener : getTimeoutListeners()) {
timeoutListener.run();
}
});
} else {
notifyTimeout();
}
}
private void notifyTimeout() {
final TriRpcStatus status = TriRpcStatus.DEADLINE_EXCEEDED.withDescription(getTimeoutMessage());
AppResponse timeoutResponse = new AppResponse();
timeoutResponse.setException(status.asException());
DeadlineFuture.this.doReceived(status, timeoutResponse);
}
}
}
| 6,112 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/DefaultPackableMethodFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.rpc.model.MethodDescriptor;
import org.apache.dubbo.rpc.model.PackableMethod;
import org.apache.dubbo.rpc.model.PackableMethodFactory;
public class DefaultPackableMethodFactory implements PackableMethodFactory {
@Override
public PackableMethod create(MethodDescriptor methodDescriptor, URL url, String contentType) {
return ReflectionPackableMethod.init(methodDescriptor, url);
}
}
| 6,113 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/ReflectionPackableMethod.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.common.constants.CommonConstants;
import org.apache.dubbo.common.serialize.MultipleSerialization;
import org.apache.dubbo.common.stream.StreamObserver;
import org.apache.dubbo.config.Constants;
import org.apache.dubbo.remoting.transport.CodecSupport;
import org.apache.dubbo.remoting.utils.UrlUtils;
import org.apache.dubbo.rpc.model.MethodDescriptor;
import org.apache.dubbo.rpc.model.Pack;
import org.apache.dubbo.rpc.model.PackableMethod;
import org.apache.dubbo.rpc.model.UnPack;
import org.apache.dubbo.rpc.model.WrapperUnPack;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.lang.reflect.ParameterizedType;
import java.util.Collection;
import java.util.Iterator;
import java.util.stream.Stream;
import com.google.protobuf.Message;
import static org.apache.dubbo.common.constants.CommonConstants.$ECHO;
import static org.apache.dubbo.common.constants.CommonConstants.PROTOBUF_MESSAGE_CLASS_NAME;
public class ReflectionPackableMethod implements PackableMethod {
private static final String GRPC_ASYNC_RETURN_CLASS = "com.google.common.util.concurrent.ListenableFuture";
private static final String TRI_ASYNC_RETURN_CLASS = "java.util.concurrent.CompletableFuture";
private static final String REACTOR_RETURN_CLASS = "reactor.core.publisher.Mono";
private static final String RX_RETURN_CLASS = "io.reactivex.Single";
private static final String GRPC_STREAM_CLASS = "io.grpc.stub.StreamObserver";
private static final Pack PB_PACK = o -> ((Message) o).toByteArray();
private final Pack requestPack;
private final Pack responsePack;
private final UnPack requestUnpack;
private final UnPack responseUnpack;
private final boolean needWrapper;
private final Collection<String> allSerialize;
@Override
public boolean needWrapper() {
return this.needWrapper;
}
public ReflectionPackableMethod(
MethodDescriptor method, URL url, String serializeName, Collection<String> allSerialize) {
Class<?>[] actualRequestTypes;
Class<?> actualResponseType;
switch (method.getRpcType()) {
case CLIENT_STREAM:
case BI_STREAM:
actualRequestTypes = new Class<?>[] {
(Class<?>)
((ParameterizedType) method.getMethod().getGenericReturnType()).getActualTypeArguments()[0]
};
actualResponseType =
(Class<?>) ((ParameterizedType) method.getMethod().getGenericParameterTypes()[0])
.getActualTypeArguments()[0];
break;
case SERVER_STREAM:
actualRequestTypes = method.getMethod().getParameterTypes();
actualResponseType =
(Class<?>) ((ParameterizedType) method.getMethod().getGenericParameterTypes()[1])
.getActualTypeArguments()[0];
break;
case UNARY:
actualRequestTypes = method.getParameterClasses();
actualResponseType = (Class<?>) method.getReturnTypes()[0];
break;
default:
throw new IllegalStateException("Can not reach here");
}
boolean singleArgument = method.getRpcType() != MethodDescriptor.RpcType.UNARY;
this.needWrapper = needWrap(method, actualRequestTypes, actualResponseType);
if (!needWrapper) {
requestPack = new PbArrayPacker(singleArgument);
responsePack = PB_PACK;
requestUnpack = new PbUnpack<>(actualRequestTypes[0]);
responseUnpack = new PbUnpack<>(actualResponseType);
} else {
final MultipleSerialization serialization = url.getOrDefaultFrameworkModel()
.getExtensionLoader(MultipleSerialization.class)
.getExtension(url.getParameter(Constants.MULTI_SERIALIZATION_KEY, CommonConstants.DEFAULT_KEY));
// client
this.requestPack =
new WrapRequestPack(serialization, url, serializeName, actualRequestTypes, singleArgument);
this.responseUnpack = new WrapResponseUnpack(serialization, url, allSerialize, actualResponseType);
// server
this.responsePack = new WrapResponsePack(serialization, url, serializeName, actualResponseType);
this.requestUnpack = new WrapRequestUnpack(serialization, url, allSerialize, actualRequestTypes);
}
this.allSerialize = allSerialize;
}
public static ReflectionPackableMethod init(MethodDescriptor methodDescriptor, URL url) {
String serializeName = UrlUtils.serializationOrDefault(url);
Collection<String> allSerialize = UrlUtils.allSerializations(url);
return new ReflectionPackableMethod(methodDescriptor, url, serializeName, allSerialize);
}
static boolean isStreamType(Class<?> type) {
return StreamObserver.class.isAssignableFrom(type) || GRPC_STREAM_CLASS.equalsIgnoreCase(type.getName());
}
/**
* Determine if the request and response instance should be wrapped in Protobuf wrapper object
*
* @return true if the request and response object is not generated by protobuf
*/
static boolean needWrap(MethodDescriptor methodDescriptor, Class<?>[] parameterClasses, Class<?> returnClass) {
String methodName = methodDescriptor.getMethodName();
// generic call must be wrapped
if (CommonConstants.$INVOKE.equals(methodName) || CommonConstants.$INVOKE_ASYNC.equals(methodName)) {
return true;
}
// echo must be wrapped
if ($ECHO.equals(methodName)) {
return true;
}
boolean returnClassProtobuf = isProtobufClass(returnClass);
// Response foo()
if (parameterClasses.length == 0) {
return !returnClassProtobuf;
}
int protobufParameterCount = 0;
int javaParameterCount = 0;
int streamParameterCount = 0;
boolean secondParameterStream = false;
// count normal and protobuf param
for (int i = 0; i < parameterClasses.length; i++) {
Class<?> parameterClass = parameterClasses[i];
if (isProtobufClass(parameterClass)) {
protobufParameterCount++;
} else {
if (isStreamType(parameterClass)) {
if (i == 1) {
secondParameterStream = true;
}
streamParameterCount++;
} else {
javaParameterCount++;
}
}
}
// more than one stream param
if (streamParameterCount > 1) {
throw new IllegalStateException("method params error: more than one Stream params. method=" + methodName);
}
// protobuf only support one param
if (protobufParameterCount >= 2) {
throw new IllegalStateException("method params error: more than one protobuf params. method=" + methodName);
}
// server stream support one normal param and one stream param
if (streamParameterCount == 1) {
if (javaParameterCount + protobufParameterCount > 1) {
throw new IllegalStateException(
"method params error: server stream does not support more than one normal param." + " method="
+ methodName);
}
// server stream: void foo(Request, StreamObserver<Response>)
if (!secondParameterStream) {
throw new IllegalStateException(
"method params error: server stream's second param must be StreamObserver." + " method="
+ methodName);
}
}
if (methodDescriptor.getRpcType() != MethodDescriptor.RpcType.UNARY) {
if (MethodDescriptor.RpcType.SERVER_STREAM == methodDescriptor.getRpcType()) {
if (!secondParameterStream) {
throw new IllegalStateException(
"method params error:server stream's second param must be StreamObserver." + " method="
+ methodName);
}
}
// param type must be consistent
if (returnClassProtobuf) {
if (javaParameterCount > 0) {
throw new IllegalStateException(
"method params error: both normal and protobuf param found. method=" + methodName);
}
} else {
if (protobufParameterCount > 0) {
throw new IllegalStateException("method params error method=" + methodName);
}
}
} else {
if (streamParameterCount > 0) {
throw new IllegalStateException(
"method params error: unary method should not contain any StreamObserver." + " method="
+ methodName);
}
if (protobufParameterCount > 0 && returnClassProtobuf) {
return false;
}
// handler reactor or rxjava only consider gen by proto
if (isMono(returnClass) || isRx(returnClass)) {
return false;
}
if (protobufParameterCount <= 0 && !returnClassProtobuf) {
return true;
}
// handle grpc stub only consider gen by proto
if (GRPC_ASYNC_RETURN_CLASS.equalsIgnoreCase(returnClass.getName()) && protobufParameterCount == 1) {
return false;
}
// handle dubbo generated method
if (TRI_ASYNC_RETURN_CLASS.equalsIgnoreCase(returnClass.getName())) {
Class<?> actualReturnClass = (Class<?>)
((ParameterizedType) methodDescriptor.getMethod().getGenericReturnType())
.getActualTypeArguments()[0];
boolean actualReturnClassProtobuf = isProtobufClass(actualReturnClass);
if (actualReturnClassProtobuf && protobufParameterCount == 1) {
return false;
}
if (!actualReturnClassProtobuf && protobufParameterCount == 0) {
return true;
}
}
// todo remove this in future
boolean ignore = checkNeedIgnore(returnClass);
if (ignore) {
return protobufParameterCount != 1;
}
throw new IllegalStateException("method params error method=" + methodName);
}
// java param should be wrapped
return javaParameterCount > 0;
}
/**
* fixme will produce error on grpc. but is harmless so ignore now
*/
static boolean checkNeedIgnore(Class<?> returnClass) {
return Iterator.class.isAssignableFrom(returnClass);
}
static boolean isMono(Class<?> clz) {
return REACTOR_RETURN_CLASS.equalsIgnoreCase(clz.getName());
}
static boolean isRx(Class<?> clz) {
return RX_RETURN_CLASS.equalsIgnoreCase(clz.getName());
}
static boolean isProtobufClass(Class<?> clazz) {
while (clazz != Object.class && clazz != null) {
Class<?>[] interfaces = clazz.getInterfaces();
if (interfaces.length > 0) {
for (Class<?> clazzInterface : interfaces) {
if (PROTOBUF_MESSAGE_CLASS_NAME.equalsIgnoreCase(clazzInterface.getName())) {
return true;
}
}
}
clazz = clazz.getSuperclass();
}
return false;
}
private static String convertHessianFromWrapper(String serializeType) {
if (TripleConstant.HESSIAN4.equals(serializeType)) {
return TripleConstant.HESSIAN2;
}
return serializeType;
}
@Override
public Pack getRequestPack() {
return requestPack;
}
@Override
public Pack getResponsePack() {
return responsePack;
}
@Override
public UnPack getResponseUnpack() {
return responseUnpack;
}
@Override
public UnPack getRequestUnpack() {
return requestUnpack;
}
private static class WrapResponsePack implements Pack {
private final MultipleSerialization multipleSerialization;
private final URL url;
private final Class<?> actualResponseType;
// wrapper request set serialize type
String requestSerialize;
private WrapResponsePack(
MultipleSerialization multipleSerialization,
URL url,
String defaultSerialize,
Class<?> actualResponseType) {
this.multipleSerialization = multipleSerialization;
this.url = url;
this.actualResponseType = actualResponseType;
this.requestSerialize = defaultSerialize;
}
@Override
public byte[] pack(Object obj) throws IOException {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
multipleSerialization.serialize(url, requestSerialize, actualResponseType, obj, bos);
return TripleCustomerProtocolWapper.TripleResponseWrapper.Builder.newBuilder()
.setSerializeType(requestSerialize)
.setType(actualResponseType.getName())
.setData(bos.toByteArray())
.build()
.toByteArray();
}
}
private static class WrapResponseUnpack implements WrapperUnPack {
private final MultipleSerialization serialization;
private final URL url;
private final Class<?> returnClass;
private final Collection<String> allSerialize;
private WrapResponseUnpack(
MultipleSerialization serialization, URL url, Collection<String> allSerialize, Class<?> returnClass) {
this.serialization = serialization;
this.url = url;
this.returnClass = returnClass;
this.allSerialize = allSerialize;
}
@Override
public Object unpack(byte[] data) throws IOException, ClassNotFoundException {
return unpack(data, false);
}
public Object unpack(byte[] data, boolean isReturnTriException) throws IOException, ClassNotFoundException {
TripleCustomerProtocolWapper.TripleResponseWrapper wrapper =
TripleCustomerProtocolWapper.TripleResponseWrapper.parseFrom(data);
final String serializeType = convertHessianFromWrapper(wrapper.getSerializeType());
CodecSupport.checkSerialization(serializeType, allSerialize);
ByteArrayInputStream bais = new ByteArrayInputStream(wrapper.getData());
if (isReturnTriException) {
return serialization.deserialize(url, serializeType, Exception.class, bais);
}
return serialization.deserialize(url, serializeType, returnClass, bais);
}
}
private static class WrapRequestPack implements Pack {
private final String serialize;
private final MultipleSerialization multipleSerialization;
private final String[] argumentsType;
private final Class<?>[] actualRequestTypes;
private final URL url;
private final boolean singleArgument;
private WrapRequestPack(
MultipleSerialization multipleSerialization,
URL url,
String serialize,
Class<?>[] actualRequestTypes,
boolean singleArgument) {
this.url = url;
this.serialize = convertHessianToWrapper(serialize);
this.multipleSerialization = multipleSerialization;
this.actualRequestTypes = actualRequestTypes;
this.argumentsType =
Stream.of(actualRequestTypes).map(Class::getName).toArray(String[]::new);
this.singleArgument = singleArgument;
}
@Override
public byte[] pack(Object obj) throws IOException {
Object[] arguments;
if (singleArgument) {
arguments = new Object[] {obj};
} else {
arguments = (Object[]) obj;
}
final TripleCustomerProtocolWapper.TripleRequestWrapper.Builder builder =
TripleCustomerProtocolWapper.TripleRequestWrapper.Builder.newBuilder();
builder.setSerializeType(serialize);
for (String type : argumentsType) {
builder.addArgTypes(type);
}
ByteArrayOutputStream bos = new ByteArrayOutputStream();
for (int i = 0; i < arguments.length; i++) {
Object argument = arguments[i];
multipleSerialization.serialize(url, serialize, actualRequestTypes[i], argument, bos);
builder.addArgs(bos.toByteArray());
bos.reset();
}
return builder.build().toByteArray();
}
/**
* Convert hessian version from Dubbo's SPI version(hessian2) to wrapper API version
* (hessian4)
*
* @param serializeType literal type
* @return hessian4 if the param is hessian2, otherwise return the param
*/
private String convertHessianToWrapper(String serializeType) {
if (TripleConstant.HESSIAN2.equals(serializeType)) {
return TripleConstant.HESSIAN4;
}
return serializeType;
}
}
private class WrapRequestUnpack implements WrapperUnPack {
private final MultipleSerialization serialization;
private final URL url;
private final Class<?>[] actualRequestTypes;
private final Collection<String> allSerialize;
private WrapRequestUnpack(
MultipleSerialization serialization,
URL url,
Collection<String> allSerialize,
Class<?>[] actualRequestTypes) {
this.serialization = serialization;
this.url = url;
this.actualRequestTypes = actualRequestTypes;
this.allSerialize = allSerialize;
}
public Object unpack(byte[] data, boolean isReturnTriException) throws IOException, ClassNotFoundException {
TripleCustomerProtocolWapper.TripleRequestWrapper wrapper =
TripleCustomerProtocolWapper.TripleRequestWrapper.parseFrom(data);
String wrapperSerializeType = convertHessianFromWrapper(wrapper.getSerializeType());
CodecSupport.checkSerialization(wrapperSerializeType, allSerialize);
Object[] ret = new Object[wrapper.getArgs().size()];
((WrapResponsePack) responsePack).requestSerialize = wrapper.getSerializeType();
for (int i = 0; i < wrapper.getArgs().size(); i++) {
ByteArrayInputStream bais =
new ByteArrayInputStream(wrapper.getArgs().get(i));
ret[i] = serialization.deserialize(url, wrapper.getSerializeType(), actualRequestTypes[i], bais);
}
return ret;
}
}
}
| 6,114 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/CancelableStreamObserver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.common.stream.StreamObserver;
import org.apache.dubbo.rpc.CancellationContext;
import org.apache.dubbo.rpc.protocol.tri.observer.ClientCallToObserverAdapter;
public abstract class CancelableStreamObserver<T> implements StreamObserver<T> {
private CancellationContext cancellationContext;
public void setCancellationContext(CancellationContext cancellationContext) {
this.cancellationContext = cancellationContext;
}
public CancellationContext getCancellationContext() {
return cancellationContext;
}
public void cancel(Throwable throwable) {
cancellationContext.cancel(throwable);
}
public void beforeStart(final ClientCallToObserverAdapter<T> clientCallToObserverAdapter) {
// do nothing
}
public void startRequest() {
// do nothing
}
}
| 6,115 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/TripleHttp2FrameCodecBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.common.utils.Assert;
import java.util.function.Consumer;
import io.netty.handler.codec.http2.DefaultHttp2Connection;
import io.netty.handler.codec.http2.Http2CodecUtil;
import io.netty.handler.codec.http2.Http2Connection;
import io.netty.handler.codec.http2.Http2FrameCodecBuilder;
import io.netty.handler.codec.http2.Http2LocalFlowController;
import io.netty.handler.codec.http2.Http2RemoteFlowController;
public class TripleHttp2FrameCodecBuilder extends Http2FrameCodecBuilder {
TripleHttp2FrameCodecBuilder(Http2Connection connection) {
connection(connection);
}
public static TripleHttp2FrameCodecBuilder fromConnection(Http2Connection connection) {
return new TripleHttp2FrameCodecBuilder(connection);
}
public static TripleHttp2FrameCodecBuilder forClient() {
return forClient(Http2CodecUtil.SMALLEST_MAX_CONCURRENT_STREAMS);
}
public static TripleHttp2FrameCodecBuilder forClient(int maxReservedStreams) {
return fromConnection(new DefaultHttp2Connection(false, maxReservedStreams));
}
public static TripleHttp2FrameCodecBuilder forServer() {
return forServer(Http2CodecUtil.SMALLEST_MAX_CONCURRENT_STREAMS);
}
public static TripleHttp2FrameCodecBuilder forServer(int maxReservedStreams) {
return fromConnection(new DefaultHttp2Connection(true, maxReservedStreams));
}
public TripleHttp2FrameCodecBuilder customizeConnection(Consumer<Http2Connection> connectionCustomizer) {
Http2Connection connection = this.connection();
Assert.notNull(connection, "connection cannot be null.");
connectionCustomizer.accept(connection);
return this;
}
public TripleHttp2FrameCodecBuilder remoteFlowController(Http2RemoteFlowController remoteFlowController) {
return this.customizeConnection((connection) -> connection.remote().flowController(remoteFlowController));
}
public TripleHttp2FrameCodecBuilder localFlowController(Http2LocalFlowController localFlowController) {
return this.customizeConnection((connection) -> connection.local().flowController(localFlowController));
}
}
| 6,116 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/TriplePathResolver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.rpc.Invoker;
import org.apache.dubbo.rpc.PathResolver;
import java.util.concurrent.ConcurrentHashMap;
public class TriplePathResolver implements PathResolver {
private final ConcurrentHashMap<String, Invoker<?>> path2Invoker = new ConcurrentHashMap<>();
private final ConcurrentHashMap<String, Object> nativeStub = new ConcurrentHashMap<>();
@Override
public Invoker<?> add(String path, Invoker<?> invoker) {
return path2Invoker.put(path, invoker);
}
@Override
public Invoker<?> addIfAbsent(String path, Invoker<?> invoker) {
return path2Invoker.putIfAbsent(path, invoker);
}
@Override
public Invoker<?> resolve(String path) {
return path2Invoker.get(path);
}
@Override
public boolean hasNativeStub(String path) {
return nativeStub.containsKey(path);
}
@Override
public void addNativeStub(String path) {
nativeStub.put(path, 0);
}
@Override
public void remove(String path) {
path2Invoker.remove(path);
}
@Override
public void destroy() {
path2Invoker.clear();
}
}
| 6,117 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/TripleHttp2Protocol.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.common.config.Configuration;
import org.apache.dubbo.common.config.ConfigurationUtils;
import org.apache.dubbo.common.extension.Activate;
import org.apache.dubbo.common.extension.ExtensionLoader;
import org.apache.dubbo.common.threadpool.manager.ExecutorRepository;
import org.apache.dubbo.remoting.ChannelHandler;
import org.apache.dubbo.remoting.api.AbstractWireProtocol;
import org.apache.dubbo.remoting.api.pu.ChannelHandlerPretender;
import org.apache.dubbo.remoting.api.pu.ChannelOperator;
import org.apache.dubbo.remoting.api.ssl.ContextOperator;
import org.apache.dubbo.remoting.utils.UrlUtils;
import org.apache.dubbo.rpc.HeaderFilter;
import org.apache.dubbo.rpc.executor.ExecutorSupport;
import org.apache.dubbo.rpc.model.FrameworkModel;
import org.apache.dubbo.rpc.model.ScopeModelAware;
import org.apache.dubbo.rpc.protocol.tri.transport.TripleCommandOutBoundHandler;
import org.apache.dubbo.rpc.protocol.tri.transport.TripleGoAwayHandler;
import org.apache.dubbo.rpc.protocol.tri.transport.TripleHttp2FrameServerHandler;
import org.apache.dubbo.rpc.protocol.tri.transport.TripleServerConnectionHandler;
import org.apache.dubbo.rpc.protocol.tri.transport.TripleTailHandler;
import org.apache.dubbo.rpc.protocol.tri.transport.TripleWriteQueue;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import io.netty.channel.ChannelDuplexHandler;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.handler.codec.http2.Http2FrameCodec;
import io.netty.handler.codec.http2.Http2FrameLogger;
import io.netty.handler.codec.http2.Http2MultiplexHandler;
import io.netty.handler.codec.http2.Http2Settings;
import io.netty.handler.codec.http2.Http2StreamChannel;
import io.netty.handler.flush.FlushConsolidationHandler;
import io.netty.handler.logging.LogLevel;
import static org.apache.dubbo.common.constants.CommonConstants.HEADER_FILTER_KEY;
import static org.apache.dubbo.rpc.Constants.H2_SETTINGS_ENABLE_PUSH_KEY;
import static org.apache.dubbo.rpc.Constants.H2_SETTINGS_HEADER_TABLE_SIZE_KEY;
import static org.apache.dubbo.rpc.Constants.H2_SETTINGS_INITIAL_WINDOW_SIZE_KEY;
import static org.apache.dubbo.rpc.Constants.H2_SETTINGS_MAX_CONCURRENT_STREAMS_KEY;
import static org.apache.dubbo.rpc.Constants.H2_SETTINGS_MAX_FRAME_SIZE_KEY;
import static org.apache.dubbo.rpc.Constants.H2_SETTINGS_MAX_HEADER_LIST_SIZE_KEY;
@Activate
public class TripleHttp2Protocol extends AbstractWireProtocol implements ScopeModelAware {
// 1 MiB
private static final int MIB_1 = 1 << 20;
private static final int MIB_8 = 1 << 23;
private static final int KIB_32 = 1 << 15;
private static final int DEFAULT_MAX_HEADER_LIST_SIZE = KIB_32;
private static final int DEFAULT_SETTING_HEADER_LIST_SIZE = 4096;
private static final int DEFAULT_MAX_FRAME_SIZE = MIB_8;
private static final int DEFAULT_WINDOW_INIT_SIZE = MIB_8;
public static final Http2FrameLogger CLIENT_LOGGER = new Http2FrameLogger(LogLevel.DEBUG, "H2_CLIENT");
public static final Http2FrameLogger SERVER_LOGGER = new Http2FrameLogger(LogLevel.DEBUG, "H2_SERVER");
private ExtensionLoader<HeaderFilter> filtersLoader;
private FrameworkModel frameworkModel;
public TripleHttp2Protocol() {
super(new Http2ProtocolDetector());
}
@Override
public void setFrameworkModel(FrameworkModel frameworkModel) {
this.frameworkModel = frameworkModel;
this.filtersLoader = frameworkModel.getExtensionLoader(HeaderFilter.class);
}
@Override
public void close() {
super.close();
}
@Override
public void configServerProtocolHandler(URL url, ChannelOperator operator) {
Configuration config = ConfigurationUtils.getGlobalConfiguration(url.getOrDefaultApplicationModel());
final List<HeaderFilter> headFilters;
if (filtersLoader != null) {
headFilters = filtersLoader.getActivateExtension(url, HEADER_FILTER_KEY);
} else {
headFilters = Collections.emptyList();
}
final Http2FrameCodec codec = TripleHttp2FrameCodecBuilder.forServer()
.customizeConnection((connection) -> connection
.remote()
.flowController(
new TriHttp2RemoteFlowController(connection, url.getOrDefaultApplicationModel())))
.gracefulShutdownTimeoutMillis(10000)
.initialSettings(new Http2Settings()
.headerTableSize(
config.getInt(H2_SETTINGS_HEADER_TABLE_SIZE_KEY, DEFAULT_SETTING_HEADER_LIST_SIZE))
.maxConcurrentStreams(config.getInt(H2_SETTINGS_MAX_CONCURRENT_STREAMS_KEY, Integer.MAX_VALUE))
.initialWindowSize(config.getInt(H2_SETTINGS_INITIAL_WINDOW_SIZE_KEY, DEFAULT_WINDOW_INIT_SIZE))
.maxFrameSize(config.getInt(H2_SETTINGS_MAX_FRAME_SIZE_KEY, DEFAULT_MAX_FRAME_SIZE))
.maxHeaderListSize(
config.getInt(H2_SETTINGS_MAX_HEADER_LIST_SIZE_KEY, DEFAULT_MAX_HEADER_LIST_SIZE)))
.frameLogger(SERVER_LOGGER)
.build();
ExecutorSupport executorSupport = ExecutorRepository.getInstance(url.getOrDefaultApplicationModel())
.getExecutorSupport(url);
codec.connection().local().flowController().frameWriter(codec.encoder().frameWriter());
TripleWriteQueue writeQueue = new TripleWriteQueue();
final Http2MultiplexHandler handler = new Http2MultiplexHandler(new ChannelInitializer<Http2StreamChannel>() {
@Override
protected void initChannel(Http2StreamChannel ch) {
final ChannelPipeline p = ch.pipeline();
p.addLast(new TripleCommandOutBoundHandler());
p.addLast(new TripleHttp2FrameServerHandler(
frameworkModel, executorSupport, headFilters, ch, writeQueue));
}
});
List<ChannelHandler> handlers = new ArrayList<>();
handlers.add(new ChannelHandlerPretender(codec));
handlers.add(new ChannelHandlerPretender(new FlushConsolidationHandler(64, true)));
handlers.add(new ChannelHandlerPretender(new TripleServerConnectionHandler()));
handlers.add(new ChannelHandlerPretender(handler));
handlers.add(new ChannelHandlerPretender(new TripleTailHandler()));
operator.configChannelHandler(handlers);
}
@Override
public void configClientPipeline(URL url, ChannelOperator operator, ContextOperator contextOperator) {
Configuration config = ConfigurationUtils.getGlobalConfiguration(url.getOrDefaultApplicationModel());
final Http2FrameCodec codec = TripleHttp2FrameCodecBuilder.forClient()
.customizeConnection((connection) -> connection
.remote()
.flowController(
new TriHttp2RemoteFlowController(connection, url.getOrDefaultApplicationModel())))
.gracefulShutdownTimeoutMillis(10000)
.initialSettings(new Http2Settings()
.headerTableSize(
config.getInt(H2_SETTINGS_HEADER_TABLE_SIZE_KEY, DEFAULT_SETTING_HEADER_LIST_SIZE))
.pushEnabled(config.getBoolean(H2_SETTINGS_ENABLE_PUSH_KEY, false))
.maxConcurrentStreams(config.getInt(H2_SETTINGS_MAX_CONCURRENT_STREAMS_KEY, Integer.MAX_VALUE))
.initialWindowSize(config.getInt(H2_SETTINGS_INITIAL_WINDOW_SIZE_KEY, DEFAULT_WINDOW_INIT_SIZE))
.maxFrameSize(config.getInt(H2_SETTINGS_MAX_FRAME_SIZE_KEY, DEFAULT_MAX_FRAME_SIZE))
.maxHeaderListSize(
config.getInt(H2_SETTINGS_MAX_HEADER_LIST_SIZE_KEY, DEFAULT_MAX_HEADER_LIST_SIZE)))
.frameLogger(CLIENT_LOGGER)
.build();
codec.connection().local().flowController().frameWriter(codec.encoder().frameWriter());
List<ChannelHandler> handlers = new ArrayList<>();
handlers.add(new ChannelHandlerPretender(codec));
handlers.add(new ChannelHandlerPretender(new Http2MultiplexHandler(new ChannelDuplexHandler())));
handlers.add(new ChannelHandlerPretender(new TripleGoAwayHandler()));
handlers.add(new ChannelHandlerPretender(new TriplePingPongHandler(UrlUtils.getCloseTimeout(url))));
handlers.add(new ChannelHandlerPretender(new TripleTailHandler()));
operator.configChannelHandler(handlers);
}
}
| 6,118 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/Http2ProtocolDetector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.remoting.api.ProtocolDetector;
import org.apache.dubbo.remoting.buffer.ByteBufferBackedChannelBuffer;
import org.apache.dubbo.remoting.buffer.ChannelBuffer;
import org.apache.dubbo.remoting.buffer.ChannelBuffers;
import io.netty.handler.codec.http2.Http2CodecUtil;
import static java.lang.Math.min;
public class Http2ProtocolDetector implements ProtocolDetector {
private final ChannelBuffer clientPrefaceString = new ByteBufferBackedChannelBuffer(
Http2CodecUtil.connectionPrefaceBuf().nioBuffer());
@Override
public Result detect(ChannelBuffer in) {
int prefaceLen = clientPrefaceString.readableBytes();
int bytesRead = min(in.readableBytes(), prefaceLen);
// If the input so far doesn't match the preface, break the connection.
if (bytesRead == 0 || !ChannelBuffers.prefixEquals(in, clientPrefaceString, bytesRead)) {
return Result.UNRECOGNIZED;
}
if (bytesRead == prefaceLen) {
return Result.RECOGNIZED;
}
return Result.NEED_MORE_DATA;
}
}
| 6,119 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/GrpcProtocol.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.rpc.model.FrameworkModel;
public class GrpcProtocol extends TripleProtocol {
public GrpcProtocol(FrameworkModel frameworkModel) {
super(frameworkModel);
}
}
| 6,120 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/transport/TripleCommandOutBoundHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.transport;
import org.apache.dubbo.rpc.protocol.tri.command.QueuedCommand;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelOutboundHandlerAdapter;
import io.netty.channel.ChannelPromise;
public class TripleCommandOutBoundHandler extends ChannelOutboundHandlerAdapter {
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
if (msg instanceof QueuedCommand) {
QueuedCommand command = (QueuedCommand) msg;
command.send(ctx, promise);
} else {
super.write(ctx, msg, promise);
}
}
}
| 6,121 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/transport/TripleGoAwayHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.transport;
import org.apache.dubbo.common.logger.Logger;
import org.apache.dubbo.common.logger.LoggerFactory;
import org.apache.dubbo.remoting.Constants;
import org.apache.dubbo.remoting.api.connection.ConnectionHandler;
import io.netty.channel.ChannelDuplexHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.http2.Http2GoAwayFrame;
import io.netty.util.ReferenceCountUtil;
public class TripleGoAwayHandler extends ChannelDuplexHandler {
private static final Logger logger = LoggerFactory.getLogger(TripleGoAwayHandler.class);
public TripleGoAwayHandler() {}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
if (msg instanceof Http2GoAwayFrame) {
final ConnectionHandler connectionHandler =
(ConnectionHandler) ctx.pipeline().get(Constants.CONNECTION_HANDLER_NAME);
if (logger.isInfoEnabled()) {
logger.info("Receive go away frame of " + ctx.channel().localAddress() + " -> "
+ ctx.channel().remoteAddress() + " and will reconnect later.");
}
connectionHandler.onGoAway(ctx.channel());
}
ReferenceCountUtil.release(msg);
}
}
| 6,122 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/transport/TripleTailHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.transport;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import io.netty.util.ReferenceCountUtil;
import io.netty.util.ReferenceCounted;
/**
* Process unhandled message to avoid mem leak and netty's unhandled exception
*/
public class TripleTailHandler extends ChannelInboundHandlerAdapter {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
if (msg instanceof ReferenceCounted) {
ReferenceCountUtil.release(msg);
}
}
}
| 6,123 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/transport/TripleIsolationExecutorSupportFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.transport;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.rpc.executor.ExecutorSupport;
import org.apache.dubbo.rpc.executor.IsolationExecutorSupportFactory;
public class TripleIsolationExecutorSupportFactory implements IsolationExecutorSupportFactory {
@Override
public ExecutorSupport createIsolationExecutorSupport(URL url) {
return new TripleIsolationExecutorSupport(url);
}
}
| 6,124 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/transport/TripleHttp2ClientResponseHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.transport;
import org.apache.dubbo.common.logger.ErrorTypeAwareLogger;
import org.apache.dubbo.common.logger.LoggerFactory;
import org.apache.dubbo.rpc.TriRpcStatus;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.handler.codec.http2.Http2DataFrame;
import io.netty.handler.codec.http2.Http2Error;
import io.netty.handler.codec.http2.Http2GoAwayFrame;
import io.netty.handler.codec.http2.Http2HeadersFrame;
import io.netty.handler.codec.http2.Http2ResetFrame;
import io.netty.handler.codec.http2.Http2StreamFrame;
import static org.apache.dubbo.common.constants.LoggerCodeConstants.PROTOCOL_FAILED_SERIALIZE_TRIPLE;
public final class TripleHttp2ClientResponseHandler extends SimpleChannelInboundHandler<Http2StreamFrame> {
private static final ErrorTypeAwareLogger LOGGER =
LoggerFactory.getErrorTypeAwareLogger(TripleHttp2ClientResponseHandler.class);
private final H2TransportListener transportListener;
public TripleHttp2ClientResponseHandler(H2TransportListener listener) {
super(false);
this.transportListener = listener;
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
super.userEventTriggered(ctx, evt);
if (evt instanceof Http2GoAwayFrame) {
Http2GoAwayFrame event = (Http2GoAwayFrame) evt;
ctx.close();
LOGGER.debug(
"Event triggered, event name is: " + event.name() + ", last stream id is: " + event.lastStreamId());
} else if (evt instanceof Http2ResetFrame) {
onResetRead(ctx, (Http2ResetFrame) evt);
}
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, Http2StreamFrame msg) throws Exception {
if (msg instanceof Http2HeadersFrame) {
final Http2HeadersFrame headers = (Http2HeadersFrame) msg;
transportListener.onHeader(headers.headers(), headers.isEndStream());
} else if (msg instanceof Http2DataFrame) {
final Http2DataFrame data = (Http2DataFrame) msg;
transportListener.onData(data.content(), data.isEndStream());
} else {
super.channelRead(ctx, msg);
}
}
private void onResetRead(ChannelHandlerContext ctx, Http2ResetFrame resetFrame) {
LOGGER.warn(
PROTOCOL_FAILED_SERIALIZE_TRIPLE,
"",
"",
"Triple Client received remote reset errorCode=" + resetFrame.errorCode());
transportListener.cancelByRemote(resetFrame.errorCode());
ctx.close();
}
@Override
public void channelInactive(ChannelHandlerContext ctx) {
ctx.close();
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
final TriRpcStatus status = TriRpcStatus.INTERNAL.withCause(cause);
LOGGER.warn(
PROTOCOL_FAILED_SERIALIZE_TRIPLE,
"",
"",
"Meet Exception on ClientResponseHandler, status code is: " + status.code,
cause);
transportListener.cancelByRemote(Http2Error.INTERNAL_ERROR.code());
ctx.close();
}
}
| 6,125 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/transport/AbstractH2TransportListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.transport;
import org.apache.dubbo.common.logger.ErrorTypeAwareLogger;
import org.apache.dubbo.common.logger.LoggerFactory;
import org.apache.dubbo.common.utils.JsonUtils;
import org.apache.dubbo.rpc.TriRpcStatus;
import org.apache.dubbo.rpc.protocol.tri.TripleConstant;
import org.apache.dubbo.rpc.protocol.tri.TripleHeaderEnum;
import org.apache.dubbo.rpc.protocol.tri.stream.StreamUtils;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Supplier;
import io.netty.handler.codec.http2.Http2Headers;
import static org.apache.dubbo.common.constants.LoggerCodeConstants.INTERNAL_ERROR;
import static org.apache.dubbo.common.constants.LoggerCodeConstants.PROTOCOL_FAILED_PARSE;
public abstract class AbstractH2TransportListener implements H2TransportListener {
private static final ErrorTypeAwareLogger LOGGER =
LoggerFactory.getErrorTypeAwareLogger(AbstractH2TransportListener.class);
/**
* Parse metadata to a KV pairs map.
*
* @param trailers the metadata from remote
* @return KV pairs map
*/
protected Map<String, Object> headersToMap(Http2Headers trailers, Supplier<Object> convertUpperHeaderSupplier) {
if (trailers == null) {
return Collections.emptyMap();
}
Map<String, Object> attachments = new HashMap<>(trailers.size());
for (Map.Entry<CharSequence, CharSequence> header : trailers) {
String key = header.getKey().toString();
if (key.endsWith(TripleConstant.HEADER_BIN_SUFFIX)
&& key.length() > TripleConstant.HEADER_BIN_SUFFIX.length()) {
try {
String realKey = key.substring(0, key.length() - TripleConstant.HEADER_BIN_SUFFIX.length());
byte[] value = StreamUtils.decodeASCIIByte(header.getValue());
attachments.put(realKey, value);
} catch (Exception e) {
LOGGER.error(PROTOCOL_FAILED_PARSE, "", "", "Failed to parse response attachment key=" + key, e);
}
} else {
attachments.put(key, header.getValue().toString());
}
}
// try converting upper key
Object obj = convertUpperHeaderSupplier.get();
if (obj == null) {
return attachments;
}
if (obj instanceof String) {
String json = TriRpcStatus.decodeMessage((String) obj);
Map<String, String> map = JsonUtils.toJavaObject(json, Map.class);
for (Map.Entry<String, String> entry : map.entrySet()) {
Object val = attachments.remove(entry.getKey());
if (val != null) {
attachments.put(entry.getValue(), val);
}
}
} else {
// If convertUpperHeaderSupplier does not return String, just fail...
// Internal invocation, use INTERNAL_ERROR instead.
LOGGER.error(
INTERNAL_ERROR,
"wrong internal invocation",
"",
"Triple convertNoLowerCaseHeader error, obj is not String");
}
return attachments;
}
protected Map<String, String> filterReservedHeaders(Http2Headers trailers) {
if (trailers == null) {
return Collections.emptyMap();
}
Map<String, String> excludeHeaders = new HashMap<>(trailers.size());
for (Map.Entry<CharSequence, CharSequence> header : trailers) {
String key = header.getKey().toString();
if (TripleHeaderEnum.containsExcludeAttachments(key)) {
excludeHeaders.put(key, trailers.getAndRemove(key).toString());
}
}
return excludeHeaders;
}
}
| 6,126 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/transport/TripleServerConnectionHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.transport;
import org.apache.dubbo.common.logger.ErrorTypeAwareLogger;
import org.apache.dubbo.common.logger.LoggerFactory;
import java.io.IOException;
import java.net.SocketException;
import java.util.HashSet;
import java.util.Set;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPromise;
import io.netty.handler.codec.http2.DefaultHttp2ResetFrame;
import io.netty.handler.codec.http2.Http2ChannelDuplexHandler;
import io.netty.handler.codec.http2.Http2Error;
import io.netty.handler.codec.http2.Http2GoAwayFrame;
import io.netty.handler.codec.http2.Http2PingFrame;
import io.netty.util.ReferenceCountUtil;
import static org.apache.dubbo.common.constants.LoggerCodeConstants.PROTOCOL_FAILED_RESPONSE;
import static org.apache.dubbo.rpc.protocol.tri.transport.GracefulShutdown.GRACEFUL_SHUTDOWN_PING;
public class TripleServerConnectionHandler extends Http2ChannelDuplexHandler {
private static final ErrorTypeAwareLogger logger =
LoggerFactory.getErrorTypeAwareLogger(TripleServerConnectionHandler.class);
// Some exceptions are not very useful and add too much noise to the log
private static final Set<String> QUIET_EXCEPTIONS = new HashSet<>();
private static final Set<Class<?>> QUIET_EXCEPTIONS_CLASS = new HashSet<>();
static {
QUIET_EXCEPTIONS.add("NativeIoException");
QUIET_EXCEPTIONS_CLASS.add(IOException.class);
QUIET_EXCEPTIONS_CLASS.add(SocketException.class);
}
private GracefulShutdown gracefulShutdown;
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
if (msg instanceof Http2PingFrame) {
if (((Http2PingFrame) msg).content() == GRACEFUL_SHUTDOWN_PING) {
if (gracefulShutdown == null) {
// this should never happen
logger.warn(
PROTOCOL_FAILED_RESPONSE,
"",
"",
"Received GRACEFUL_SHUTDOWN_PING Ack but gracefulShutdown is null");
} else {
gracefulShutdown.secondGoAwayAndClose(ctx);
}
}
} else if (msg instanceof Http2GoAwayFrame) {
ReferenceCountUtil.release(msg);
} else {
super.channelRead(ctx, msg);
}
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
super.channelInactive(ctx);
// reset all active stream on connection close
forEachActiveStream(stream -> {
DefaultHttp2ResetFrame resetFrame = new DefaultHttp2ResetFrame(Http2Error.NO_ERROR).stream(stream);
ctx.fireChannelRead(resetFrame);
return true;
});
}
private boolean isQuiteException(Throwable t) {
if (QUIET_EXCEPTIONS_CLASS.contains(t.getClass())) {
return true;
}
return QUIET_EXCEPTIONS.contains(t.getClass().getSimpleName());
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
super.userEventTriggered(ctx, evt);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
// this may be change in future follow https://github.com/apache/dubbo/pull/8644
if (isQuiteException(cause)) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("Channel:%s Error", ctx.channel()), cause);
}
} else {
logger.warn(PROTOCOL_FAILED_RESPONSE, "", "", String.format("Channel:%s Error", ctx.channel()), cause);
}
ctx.close();
}
@Override
public void close(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception {
if (gracefulShutdown == null) {
gracefulShutdown = new GracefulShutdown(ctx, "app_requested", promise);
}
gracefulShutdown.gracefulShutdown();
}
}
| 6,127 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/transport/WriteQueue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.transport;
import org.apache.dubbo.rpc.protocol.tri.command.QueuedCommand;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelPromise;
@Deprecated
public class WriteQueue {
static final int DEQUE_CHUNK_SIZE = 128;
private final Queue<QueuedCommand> queue;
private final AtomicBoolean scheduled;
public WriteQueue() {
queue = new ConcurrentLinkedQueue<>();
scheduled = new AtomicBoolean(false);
}
public ChannelFuture enqueue(QueuedCommand command, boolean rst) {
return enqueue(command);
}
public ChannelFuture enqueue(QueuedCommand command) {
ChannelPromise promise = command.promise();
if (promise == null) {
Channel ch = command.channel();
promise = ch.newPromise();
command.promise(promise);
}
queue.add(command);
scheduleFlush(command.channel());
return promise;
}
public void scheduleFlush(Channel ch) {
if (scheduled.compareAndSet(false, true)) {
ch.parent().eventLoop().execute(this::flush);
}
}
private void flush() {
Channel ch = null;
try {
QueuedCommand cmd;
int i = 0;
boolean flushedOnce = false;
while ((cmd = queue.poll()) != null) {
ch = cmd.channel();
cmd.run(ch);
i++;
if (i == DEQUE_CHUNK_SIZE) {
i = 0;
ch.parent().flush();
flushedOnce = true;
}
}
if (ch != null && (i != 0 || !flushedOnce)) {
ch.parent().flush();
}
} finally {
scheduled.set(false);
if (!queue.isEmpty()) {
scheduleFlush(ch);
}
}
}
}
| 6,128 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/transport/TripleWriteQueue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.transport;
import org.apache.dubbo.common.BatchExecutorQueue;
import org.apache.dubbo.rpc.protocol.tri.command.QueuedCommand;
import java.util.concurrent.CompletionException;
import java.util.concurrent.Executor;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelPromise;
public class TripleWriteQueue extends BatchExecutorQueue<QueuedCommand> {
public TripleWriteQueue() {}
public TripleWriteQueue(int chunkSize) {
super(chunkSize);
}
public ChannelFuture enqueue(QueuedCommand command, boolean rst) {
return enqueue(command);
}
public ChannelFuture enqueue(QueuedCommand command) {
return this.enqueueFuture(command, command.channel().eventLoop());
}
public ChannelFuture enqueueFuture(QueuedCommand command, Executor executor) {
ChannelPromise promise = command.promise();
if (promise == null) {
Channel ch = command.channel();
promise = ch.newPromise();
command.promise(promise);
}
super.enqueue(command, executor);
return promise;
}
@Override
protected void prepare(QueuedCommand item) {
try {
Channel channel = item.channel();
item.run(channel);
} catch (CompletionException e) {
item.promise().tryFailure(e.getCause());
}
}
@Override
protected void flush(QueuedCommand item) {
try {
Channel channel = item.channel();
item.run(channel);
channel.flush();
} catch (CompletionException e) {
item.promise().tryFailure(e.getCause());
}
}
}
| 6,129 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/transport/TripleIsolationExecutorSupport.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.transport;
import org.apache.dubbo.common.ServiceKey;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.common.logger.ErrorTypeAwareLogger;
import org.apache.dubbo.common.logger.LoggerFactory;
import org.apache.dubbo.rpc.executor.AbstractIsolationExecutorSupport;
import org.apache.dubbo.rpc.protocol.tri.TripleHeaderEnum;
import io.netty.handler.codec.http2.Http2Headers;
public class TripleIsolationExecutorSupport extends AbstractIsolationExecutorSupport {
private static final ErrorTypeAwareLogger logger =
LoggerFactory.getErrorTypeAwareLogger(TripleIsolationExecutorSupport.class);
public TripleIsolationExecutorSupport(URL url) {
super(url);
}
@Override
protected ServiceKey getServiceKey(Object data) {
if (!(data instanceof Http2Headers)) {
return null;
}
Http2Headers headers = (Http2Headers) data;
String path = headers.path().toString();
String[] parts = path.split("/"); // path like /{interfaceName}/{methodName}
String interfaceName = parts[1];
String version = headers.contains(TripleHeaderEnum.SERVICE_VERSION.getHeader())
? headers.get(TripleHeaderEnum.SERVICE_VERSION.getHeader()).toString()
: null;
String group = headers.contains(TripleHeaderEnum.SERVICE_GROUP.getHeader())
? headers.get(TripleHeaderEnum.SERVICE_GROUP.getHeader()).toString()
: null;
return new ServiceKey(interfaceName, version, group);
}
}
| 6,130 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/transport/H2TransportListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.transport;
import io.netty.buffer.ByteBuf;
import io.netty.handler.codec.http2.Http2Headers;
/**
* An observer used for transport messaging which provides full streaming support. A
* TransportObserver receives raw data or control messages from local/remote.
*/
public interface H2TransportListener {
/**
* Transport metadata
*
* @param headers metadata KV paris
* @param endStream whether this data should terminate the stream
*/
void onHeader(Http2Headers headers, boolean endStream);
/**
* Transport data
*
* @param data raw byte array
* @param endStream whether this data should terminate the stream
*/
void onData(ByteBuf data, boolean endStream);
void cancelByRemote(long errorCode);
}
| 6,131 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/transport/TripleHttp2FrameServerHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.transport;
import org.apache.dubbo.common.logger.ErrorTypeAwareLogger;
import org.apache.dubbo.common.logger.LoggerFactory;
import org.apache.dubbo.rpc.HeaderFilter;
import org.apache.dubbo.rpc.PathResolver;
import org.apache.dubbo.rpc.TriRpcStatus;
import org.apache.dubbo.rpc.executor.ExecutorSupport;
import org.apache.dubbo.rpc.model.FrameworkModel;
import org.apache.dubbo.rpc.protocol.tri.compressor.DeCompressor;
import org.apache.dubbo.rpc.protocol.tri.stream.TripleServerStream;
import java.util.List;
import java.util.concurrent.Executor;
import io.netty.channel.ChannelDuplexHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.http2.Http2DataFrame;
import io.netty.handler.codec.http2.Http2HeadersFrame;
import io.netty.handler.codec.http2.Http2ResetFrame;
import io.netty.handler.codec.http2.Http2StreamChannel;
import io.netty.util.ReferenceCountUtil;
import io.netty.util.ReferenceCounted;
import static org.apache.dubbo.common.constants.LoggerCodeConstants.PROTOCOL_FAILED_RESPONSE;
public class TripleHttp2FrameServerHandler extends ChannelDuplexHandler {
private static final ErrorTypeAwareLogger LOGGER =
LoggerFactory.getErrorTypeAwareLogger(TripleHttp2FrameServerHandler.class);
private final PathResolver pathResolver;
private final ExecutorSupport executorSupport;
private final String acceptEncoding;
private final TripleServerStream tripleServerStream;
public TripleHttp2FrameServerHandler(
FrameworkModel frameworkModel,
ExecutorSupport executorSupport,
List<HeaderFilter> filters,
Http2StreamChannel channel,
TripleWriteQueue writeQueue) {
this.executorSupport = executorSupport;
this.acceptEncoding = String.join(
",", frameworkModel.getExtensionLoader(DeCompressor.class).getSupportedExtensions());
this.pathResolver =
frameworkModel.getExtensionLoader(PathResolver.class).getDefaultExtension();
// The executor will be assigned in onHeadersRead method
tripleServerStream = new TripleServerStream(
channel, frameworkModel, null, pathResolver, acceptEncoding, filters, writeQueue);
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
if (msg instanceof Http2HeadersFrame) {
onHeadersRead(ctx, (Http2HeadersFrame) msg);
} else if (msg instanceof Http2DataFrame) {
onDataRead(ctx, (Http2DataFrame) msg);
} else if (msg instanceof ReferenceCounted) {
// ignored
ReferenceCountUtil.release(msg);
}
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt instanceof Http2ResetFrame) {
onResetRead(ctx, (Http2ResetFrame) evt);
} else {
super.userEventTriggered(ctx, evt);
}
}
public void onResetRead(ChannelHandlerContext ctx, Http2ResetFrame frame) {
LOGGER.warn(
PROTOCOL_FAILED_RESPONSE, "", "", "Triple Server received remote reset errorCode=" + frame.errorCode());
if (tripleServerStream != null) {
tripleServerStream.transportObserver.cancelByRemote(frame.errorCode());
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
if (LOGGER.isWarnEnabled()) {
LOGGER.warn(PROTOCOL_FAILED_RESPONSE, "", "", "Exception in processing triple message", cause);
}
TriRpcStatus status = TriRpcStatus.getStatus(cause, "Provider's error:\n" + cause.getMessage());
tripleServerStream.cancelByLocal(status);
}
public void onDataRead(ChannelHandlerContext ctx, Http2DataFrame msg) throws Exception {
tripleServerStream.transportObserver.onData(msg.content(), msg.isEndStream());
}
public void onHeadersRead(ChannelHandlerContext ctx, Http2HeadersFrame msg) throws Exception {
Executor executor = executorSupport.getExecutor(msg.headers());
tripleServerStream.setExecutor(executor);
tripleServerStream.transportObserver.onHeader(msg.headers(), msg.isEndStream());
}
}
| 6,132 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/transport/GracefulShutdown.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.transport;
import java.util.concurrent.TimeUnit;
import io.netty.buffer.ByteBufUtil;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPromise;
import io.netty.handler.codec.http2.DefaultHttp2GoAwayFrame;
import io.netty.handler.codec.http2.DefaultHttp2PingFrame;
import io.netty.handler.codec.http2.Http2Error;
import io.netty.handler.codec.http2.Http2GoAwayFrame;
import io.netty.handler.codec.http2.Http2PingFrame;
import io.netty.util.concurrent.Future;
public class GracefulShutdown {
static final long GRACEFUL_SHUTDOWN_PING = 0x97ACEF001L;
private static final long GRACEFUL_SHUTDOWN_PING_TIMEOUT_NANOS = TimeUnit.SECONDS.toNanos(10);
private final ChannelHandlerContext ctx;
private final ChannelPromise originPromise;
private final String goAwayMessage;
private boolean pingAckedOrTimeout;
private Future<?> pingFuture;
public GracefulShutdown(ChannelHandlerContext ctx, String goAwayMessage, ChannelPromise originPromise) {
this.ctx = ctx;
this.goAwayMessage = goAwayMessage;
this.originPromise = originPromise;
}
public void gracefulShutdown() {
Http2GoAwayFrame goAwayFrame =
new DefaultHttp2GoAwayFrame(Http2Error.NO_ERROR, ByteBufUtil.writeAscii(ctx.alloc(), goAwayMessage));
goAwayFrame.setExtraStreamIds(Integer.MAX_VALUE);
ctx.writeAndFlush(goAwayFrame);
pingFuture = ctx.executor()
.schedule(() -> secondGoAwayAndClose(ctx), GRACEFUL_SHUTDOWN_PING_TIMEOUT_NANOS, TimeUnit.NANOSECONDS);
Http2PingFrame pingFrame = new DefaultHttp2PingFrame(GRACEFUL_SHUTDOWN_PING, false);
ctx.writeAndFlush(pingFrame);
}
void secondGoAwayAndClose(ChannelHandlerContext ctx) {
if (pingAckedOrTimeout) {
return;
}
pingAckedOrTimeout = true;
pingFuture.cancel(false);
try {
Http2GoAwayFrame goAwayFrame = new DefaultHttp2GoAwayFrame(
Http2Error.NO_ERROR, ByteBufUtil.writeAscii(this.ctx.alloc(), this.goAwayMessage));
ctx.writeAndFlush(goAwayFrame);
// TODO support customize graceful shutdown timeout mills
ctx.close(originPromise);
} catch (Exception e) {
ctx.fireExceptionCaught(e);
}
}
}
| 6,133 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/compressor/Compressor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.compressor;
import org.apache.dubbo.common.extension.ExtensionScope;
import org.apache.dubbo.common.extension.SPI;
import org.apache.dubbo.rpc.Constants;
import org.apache.dubbo.rpc.model.FrameworkModel;
/**
* compress payload for grpc request, and decompress response payload Configure it in files,
* pictures or other configurations that exist in the system properties Configure {@link
* Constants#COMPRESSOR_KEY} in dubbo.properties、dubbo.yml or other configuration that exist in the
* system property
*/
@SPI(scope = ExtensionScope.FRAMEWORK)
public interface Compressor extends MessageEncoding {
Compressor NONE = Identity.IDENTITY;
static Compressor getCompressor(FrameworkModel frameworkModel, String compressorStr) {
if (null == compressorStr) {
return null;
}
if (compressorStr.equals(Identity.MESSAGE_ENCODING)) {
return NONE;
}
return frameworkModel.getExtensionLoader(Compressor.class).getExtension(compressorStr);
}
/**
* compress payload
*
* @param payloadByteArr payload byte array
* @return compressed payload byte array
*/
byte[] compress(byte[] payloadByteArr);
}
| 6,134 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/compressor/Gzip.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.compressor;
import org.apache.dubbo.rpc.RpcException;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
/**
* gzip compressor
*/
public class Gzip implements Compressor, DeCompressor {
public static final String GZIP = "gzip";
@Override
public String getMessageEncoding() {
return GZIP;
}
@Override
public byte[] compress(byte[] payloadByteArr) throws RpcException {
if (null == payloadByteArr || 0 == payloadByteArr.length) {
return new byte[0];
}
ByteArrayOutputStream byteOutStream = new ByteArrayOutputStream();
try (GZIPOutputStream gzipOutputStream = new GZIPOutputStream(byteOutStream)) {
gzipOutputStream.write(payloadByteArr);
} catch (Exception exception) {
throw new IllegalStateException(exception);
}
return byteOutStream.toByteArray();
}
@Override
public byte[] decompress(byte[] payloadByteArr) throws RpcException {
if (null == payloadByteArr || 0 == payloadByteArr.length) {
return new byte[0];
}
ByteArrayInputStream byteInStream = new ByteArrayInputStream(payloadByteArr);
ByteArrayOutputStream byteOutStream = new ByteArrayOutputStream();
try (GZIPInputStream gzipInputStream = new GZIPInputStream(byteInStream)) {
int readByteNum;
byte[] bufferArr = new byte[256];
while ((readByteNum = gzipInputStream.read(bufferArr)) >= 0) {
byteOutStream.write(bufferArr, 0, readByteNum);
}
} catch (Exception exception) {
throw new IllegalStateException(exception);
}
return byteOutStream.toByteArray();
}
}
| 6,135 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/compressor/MessageEncoding.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.compressor;
public interface MessageEncoding {
/**
* message encoding of current compressor
*
* @return return message encoding
*/
String getMessageEncoding();
}
| 6,136 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/compressor/Identity.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.compressor;
/**
* Default compressor
* <p>
* Do not use the spi
*/
public class Identity implements Compressor, DeCompressor {
public static final String MESSAGE_ENCODING = "identity";
public static final Identity IDENTITY = new Identity();
@Override
public String getMessageEncoding() {
return MESSAGE_ENCODING;
}
@Override
public byte[] compress(byte[] payloadByteArr) {
return payloadByteArr;
}
@Override
public byte[] decompress(byte[] payloadByteArr) {
return payloadByteArr;
}
}
| 6,137 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/compressor/Snappy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.compressor;
import org.apache.dubbo.rpc.RpcException;
import java.io.IOException;
/**
* snappy compressor, Provide high-speed compression speed and reasonable compression ratio
*
* @link https://github.com/google/snappy
*/
public class Snappy implements Compressor, DeCompressor {
public static final String SNAPPY = "snappy";
@Override
public String getMessageEncoding() {
return SNAPPY;
}
@Override
public byte[] compress(byte[] payloadByteArr) throws RpcException {
if (null == payloadByteArr || 0 == payloadByteArr.length) {
return new byte[0];
}
try {
return org.xerial.snappy.Snappy.compress(payloadByteArr);
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
@Override
public byte[] decompress(byte[] payloadByteArr) {
if (null == payloadByteArr || 0 == payloadByteArr.length) {
return new byte[0];
}
try {
return org.xerial.snappy.Snappy.uncompress(payloadByteArr);
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
}
| 6,138 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/compressor/Bzip2.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.compressor;
import org.apache.dubbo.rpc.RpcException;
import java.io.ByteArrayInputStream;
import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream;
import org.apache.commons.compress.compressors.bzip2.BZip2CompressorOutputStream;
import org.apache.commons.io.output.ByteArrayOutputStream;
/**
* bzip2 compressor, faster compression efficiency
*
* @link https://commons.apache.org/proper/commons-compress/
*/
public class Bzip2 implements Compressor, DeCompressor {
public static final String BZIP2 = "bzip2";
@Override
public String getMessageEncoding() {
return BZIP2;
}
@Override
public byte[] compress(byte[] payloadByteArr) throws RpcException {
if (null == payloadByteArr || 0 == payloadByteArr.length) {
return new byte[0];
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
BZip2CompressorOutputStream cos;
try {
cos = new BZip2CompressorOutputStream(out);
cos.write(payloadByteArr);
cos.close();
} catch (Exception e) {
throw new IllegalStateException(e);
}
return out.toByteArray();
}
@Override
public byte[] decompress(byte[] payloadByteArr) {
if (null == payloadByteArr || 0 == payloadByteArr.length) {
return new byte[0];
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
ByteArrayInputStream in = new ByteArrayInputStream(payloadByteArr);
try {
BZip2CompressorInputStream unZip = new BZip2CompressorInputStream(in);
byte[] buffer = new byte[2048];
int n;
while ((n = unZip.read(buffer)) >= 0) {
out.write(buffer, 0, n);
}
} catch (Exception e) {
throw new IllegalStateException(e);
}
return out.toByteArray();
}
}
| 6,139 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/compressor/DeCompressor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.compressor;
import org.apache.dubbo.common.extension.ExtensionScope;
import org.apache.dubbo.common.extension.SPI;
import org.apache.dubbo.rpc.model.FrameworkModel;
@SPI(scope = ExtensionScope.FRAMEWORK)
public interface DeCompressor extends MessageEncoding {
DeCompressor NONE = Identity.IDENTITY;
static DeCompressor getCompressor(FrameworkModel frameworkModel, String compressorStr) {
if (null == compressorStr) {
return null;
}
if (compressorStr.equals(Identity.MESSAGE_ENCODING)) {
return NONE;
}
return frameworkModel.getExtensionLoader(DeCompressor.class).getExtension(compressorStr);
}
/**
* decompress payload
*
* @param payloadByteArr payload byte array
* @return decompressed payload byte array
*/
byte[] decompress(byte[] payloadByteArr);
}
| 6,140 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/stream/ClientStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.stream;
import org.apache.dubbo.rpc.TriRpcStatus;
import java.util.Map;
import io.netty.util.concurrent.Future;
/**
* ClientStream is used to send request to server and receive response from server. Response is
* received by {@link ClientStream.Listener} Requests are sent by {@link ClientStream} directly.
*/
public interface ClientStream extends Stream {
interface Listener extends Stream.Listener {
/**
* Callback when stream started.
*/
void onStart();
/**
* Callback when stream completed.
*
* @param attachments received from remote peer
*/
default void onComplete(TriRpcStatus status, Map<String, Object> attachments) {}
/**
* Callback when request completed.
*
* @param status response status
* @param attachments attachments received from remote peer
* @param reserved triple protocol reserved data
*/
default void onComplete(
TriRpcStatus status,
Map<String, Object> attachments,
Map<String, String> reserved,
boolean isReturnTriException) {
onComplete(status, attachments);
}
}
/**
* Send message to remote peer.
*
* @param message message to send to remote peer
* @param eos whether this is the last message
* @return future to callback when send message is done
*/
Future<?> sendMessage(byte[] message, int compressFlag, boolean eos);
/**
* No more data will be sent, half close this stream to wait server response.
*
* @return a future of send result
*/
Future<?> halfClose();
}
| 6,141 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/stream/StreamUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.stream;
import org.apache.dubbo.common.logger.ErrorTypeAwareLogger;
import org.apache.dubbo.common.logger.LoggerFactory;
import org.apache.dubbo.common.utils.JsonUtils;
import org.apache.dubbo.common.utils.LRU2Cache;
import org.apache.dubbo.rpc.TriRpcStatus;
import org.apache.dubbo.rpc.protocol.tri.TripleConstant;
import org.apache.dubbo.rpc.protocol.tri.TripleHeaderEnum;
import java.nio.charset.StandardCharsets;
import java.util.Base64;
import java.util.Collections;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import io.netty.handler.codec.http2.DefaultHttp2Headers;
import static org.apache.dubbo.common.constants.LoggerCodeConstants.PROTOCOL_UNSUPPORTED;
public class StreamUtils {
protected static final ErrorTypeAwareLogger LOGGER = LoggerFactory.getErrorTypeAwareLogger(StreamUtils.class);
private static final Base64.Decoder BASE64_DECODER = Base64.getDecoder();
private static final Base64.Encoder BASE64_ENCODER = Base64.getEncoder().withoutPadding();
private static final int MAX_LRU_HEADER_MAP_SIZE = 10000;
private static final Map<String, String> lruHeaderMap = new LRU2Cache<>(MAX_LRU_HEADER_MAP_SIZE);
public static String encodeBase64ASCII(byte[] in) {
byte[] bytes = encodeBase64(in);
return new String(bytes, StandardCharsets.US_ASCII);
}
public static byte[] encodeBase64(byte[] in) {
return BASE64_ENCODER.encode(in);
}
public static byte[] decodeASCIIByte(CharSequence value) {
return BASE64_DECODER.decode(value.toString().getBytes(StandardCharsets.US_ASCII));
}
public static Map<String, Object> toAttachments(Map<String, Object> origin) {
if (origin == null || origin.isEmpty()) {
return Collections.emptyMap();
}
Map<String, Object> res = new HashMap<>(origin.size());
origin.forEach((k, v) -> {
if (TripleHeaderEnum.containsExcludeAttachments(k)) {
return;
}
res.put(k, v);
});
return res;
}
/**
* Parse and put the KV pairs into metadata. Ignore Http2 PseudoHeaderName and internal name.
* Only raw byte array or string value will be put.
*
* @param headers the metadata holder
* @param attachments KV pairs
* @param needConvertHeaderKey convert flag
*/
public static void convertAttachment(
DefaultHttp2Headers headers, Map<String, Object> attachments, boolean needConvertHeaderKey) {
if (attachments == null) {
return;
}
Map<String, String> needConvertKey = new HashMap<>();
for (Map.Entry<String, Object> entry : attachments.entrySet()) {
String key = lruHeaderMap.get(entry.getKey());
if (key == null) {
final String lowerCaseKey = entry.getKey().toLowerCase(Locale.ROOT);
lruHeaderMap.put(entry.getKey(), lowerCaseKey);
key = lowerCaseKey;
}
if (TripleHeaderEnum.containsExcludeAttachments(key)) {
continue;
}
final Object v = entry.getValue();
if (v == null) {
continue;
}
if (needConvertHeaderKey && !key.equals(entry.getKey())) {
needConvertKey.put(key, entry.getKey());
}
convertSingleAttachment(headers, key, v);
}
if (!needConvertKey.isEmpty()) {
String needConvertJson = JsonUtils.toJson(needConvertKey);
headers.add(TripleHeaderEnum.TRI_HEADER_CONVERT.getHeader(), TriRpcStatus.encodeMessage(needConvertJson));
}
}
public static void convertAttachment(DefaultHttp2Headers headers, Map<String, Object> attachments) {
convertAttachment(headers, attachments, false);
}
/**
* Convert each user's attach value to metadata
*
* @param headers outbound headers
* @param key metadata key
* @param v metadata value (Metadata Only string and byte arrays are allowed)
*/
private static void convertSingleAttachment(DefaultHttp2Headers headers, String key, Object v) {
try {
if (v instanceof String || v instanceof Number || v instanceof Boolean) {
String str = v.toString();
headers.set(key, str);
} else if (v instanceof byte[]) {
String str = encodeBase64ASCII((byte[]) v);
headers.set(key + TripleConstant.HEADER_BIN_SUFFIX, str);
} else {
LOGGER.warn(
PROTOCOL_UNSUPPORTED,
"",
"",
"Unsupported attachment k: " + key + " class: "
+ v.getClass().getName());
}
} catch (Throwable t) {
LOGGER.warn(
PROTOCOL_UNSUPPORTED,
"",
"",
"Meet exception when convert single attachment key:" + key + " value=" + v,
t);
}
}
}
| 6,142 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/stream/TripleClientStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.stream;
import org.apache.dubbo.common.constants.CommonConstants;
import org.apache.dubbo.common.logger.ErrorTypeAwareLogger;
import org.apache.dubbo.common.logger.LoggerFactory;
import org.apache.dubbo.rpc.TriRpcStatus;
import org.apache.dubbo.rpc.model.FrameworkModel;
import org.apache.dubbo.rpc.protocol.tri.ClassLoadUtil;
import org.apache.dubbo.rpc.protocol.tri.ExceptionUtils;
import org.apache.dubbo.rpc.protocol.tri.TripleHeaderEnum;
import org.apache.dubbo.rpc.protocol.tri.command.CancelQueueCommand;
import org.apache.dubbo.rpc.protocol.tri.command.CreateStreamQueueCommand;
import org.apache.dubbo.rpc.protocol.tri.command.DataQueueCommand;
import org.apache.dubbo.rpc.protocol.tri.command.EndStreamQueueCommand;
import org.apache.dubbo.rpc.protocol.tri.command.HeaderQueueCommand;
import org.apache.dubbo.rpc.protocol.tri.compressor.DeCompressor;
import org.apache.dubbo.rpc.protocol.tri.compressor.Identity;
import org.apache.dubbo.rpc.protocol.tri.frame.Deframer;
import org.apache.dubbo.rpc.protocol.tri.frame.TriDecoder;
import org.apache.dubbo.rpc.protocol.tri.transport.AbstractH2TransportListener;
import org.apache.dubbo.rpc.protocol.tri.transport.H2TransportListener;
import org.apache.dubbo.rpc.protocol.tri.transport.TripleCommandOutBoundHandler;
import org.apache.dubbo.rpc.protocol.tri.transport.TripleHttp2ClientResponseHandler;
import org.apache.dubbo.rpc.protocol.tri.transport.TripleWriteQueue;
import org.apache.dubbo.rpc.protocol.tri.transport.WriteQueue;
import java.io.IOException;
import java.net.SocketAddress;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executor;
import com.google.protobuf.Any;
import com.google.rpc.DebugInfo;
import com.google.rpc.ErrorInfo;
import com.google.rpc.Status;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import io.netty.handler.codec.http2.Http2Error;
import io.netty.handler.codec.http2.Http2Headers;
import io.netty.handler.codec.http2.Http2StreamChannel;
import io.netty.handler.codec.http2.Http2StreamChannelBootstrap;
import io.netty.util.ReferenceCountUtil;
import static org.apache.dubbo.common.constants.LoggerCodeConstants.PROTOCOL_FAILED_RESPONSE;
/**
* ClientStream is an abstraction for bi-directional messaging. It maintains a {@link WriteQueue} to
* write Http2Frame to remote. A {@link H2TransportListener} receives Http2Frame from remote.
* Instead of maintaining state, this class depends on upper layer or transport layer's states.
*/
public class TripleClientStream extends AbstractStream implements ClientStream {
private static final ErrorTypeAwareLogger LOGGER = LoggerFactory.getErrorTypeAwareLogger(TripleClientStream.class);
public final ClientStream.Listener listener;
private final TripleWriteQueue writeQueue;
private Deframer deframer;
private final Channel parent;
private final TripleStreamChannelFuture streamChannelFuture;
private boolean halfClosed;
private boolean rst;
private boolean isReturnTriException = false;
// for test
TripleClientStream(
FrameworkModel frameworkModel,
Executor executor,
TripleWriteQueue writeQueue,
ClientStream.Listener listener,
Http2StreamChannel http2StreamChannel) {
super(executor, frameworkModel);
this.parent = http2StreamChannel.parent();
this.listener = listener;
this.writeQueue = writeQueue;
this.streamChannelFuture = initHttp2StreamChannel(http2StreamChannel);
}
public TripleClientStream(
FrameworkModel frameworkModel,
Executor executor,
Channel parent,
ClientStream.Listener listener,
TripleWriteQueue writeQueue) {
super(executor, frameworkModel);
this.parent = parent;
this.listener = listener;
this.writeQueue = writeQueue;
this.streamChannelFuture = initHttp2StreamChannel(parent);
}
private TripleStreamChannelFuture initHttp2StreamChannel(Channel parent) {
TripleStreamChannelFuture streamChannelFuture = new TripleStreamChannelFuture(parent);
Http2StreamChannelBootstrap bootstrap = new Http2StreamChannelBootstrap(parent);
bootstrap.handler(new ChannelInboundHandlerAdapter() {
@Override
public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
Channel channel = ctx.channel();
channel.pipeline().addLast(new TripleCommandOutBoundHandler());
channel.pipeline().addLast(new TripleHttp2ClientResponseHandler(createTransportListener()));
}
});
CreateStreamQueueCommand cmd = CreateStreamQueueCommand.create(bootstrap, streamChannelFuture);
this.writeQueue.enqueue(cmd);
return streamChannelFuture;
}
public ChannelFuture sendHeader(Http2Headers headers) {
if (this.writeQueue == null) {
// already processed at createStream()
return parent.newFailedFuture(new IllegalStateException("Stream already closed"));
}
ChannelFuture checkResult = preCheck();
if (!checkResult.isSuccess()) {
return checkResult;
}
final HeaderQueueCommand headerCmd = HeaderQueueCommand.createHeaders(streamChannelFuture, headers);
return writeQueue.enqueueFuture(headerCmd, parent.eventLoop()).addListener(future -> {
if (!future.isSuccess()) {
transportException(future.cause());
}
});
}
private void transportException(Throwable cause) {
final TriRpcStatus status =
TriRpcStatus.INTERNAL.withDescription("Http2 exception").withCause(cause);
listener.onComplete(status, null, null, false);
}
public ChannelFuture cancelByLocal(TriRpcStatus status) {
ChannelFuture checkResult = preCheck();
if (!checkResult.isSuccess()) {
return checkResult;
}
final CancelQueueCommand cmd = CancelQueueCommand.createCommand(streamChannelFuture, Http2Error.CANCEL);
TripleClientStream.this.rst = true;
return this.writeQueue.enqueue(cmd);
}
@Override
public SocketAddress remoteAddress() {
return parent.remoteAddress();
}
@Override
public ChannelFuture sendMessage(byte[] message, int compressFlag, boolean eos) {
ChannelFuture checkResult = preCheck();
if (!checkResult.isSuccess()) {
return checkResult;
}
final DataQueueCommand cmd = DataQueueCommand.create(streamChannelFuture, message, false, compressFlag);
return this.writeQueue.enqueueFuture(cmd, parent.eventLoop()).addListener(future -> {
if (!future.isSuccess()) {
cancelByLocal(TriRpcStatus.INTERNAL
.withDescription("Client write message failed")
.withCause(future.cause()));
transportException(future.cause());
}
});
}
@Override
public void request(int n) {
deframer.request(n);
}
@Override
public ChannelFuture halfClose() {
ChannelFuture checkResult = preCheck();
if (!checkResult.isSuccess()) {
return checkResult;
}
final EndStreamQueueCommand cmd = EndStreamQueueCommand.create(streamChannelFuture);
return this.writeQueue.enqueueFuture(cmd, parent.eventLoop()).addListener(future -> {
if (future.isSuccess()) {
halfClosed = true;
}
});
}
private ChannelFuture preCheck() {
if (rst) {
return streamChannelFuture.getNow().newFailedFuture(new IOException("stream channel has reset"));
}
return parent.newSucceededFuture();
}
/**
* @return transport listener
*/
H2TransportListener createTransportListener() {
return new ClientTransportListener();
}
class ClientTransportListener extends AbstractH2TransportListener implements H2TransportListener {
private TriRpcStatus transportError;
private DeCompressor decompressor;
private boolean headerReceived;
private Http2Headers trailers;
void handleH2TransportError(TriRpcStatus status) {
writeQueue.enqueue(CancelQueueCommand.createCommand(streamChannelFuture, Http2Error.NO_ERROR));
TripleClientStream.this.rst = true;
finishProcess(status, null, false);
}
void finishProcess(TriRpcStatus status, Http2Headers trailers, boolean isReturnTriException) {
final Map<String, String> reserved = filterReservedHeaders(trailers);
final Map<String, Object> attachments =
headersToMap(trailers, () -> reserved.get(TripleHeaderEnum.TRI_HEADER_CONVERT.getHeader()));
final TriRpcStatus detailStatus;
final TriRpcStatus statusFromTrailers = getStatusFromTrailers(reserved);
if (statusFromTrailers != null) {
detailStatus = statusFromTrailers;
} else {
detailStatus = status;
}
listener.onComplete(detailStatus, attachments, reserved, isReturnTriException);
}
private TriRpcStatus validateHeaderStatus(Http2Headers headers) {
Integer httpStatus = headers.status() == null
? null
: Integer.parseInt(headers.status().toString());
if (httpStatus == null) {
return TriRpcStatus.INTERNAL.withDescription("Missing HTTP status code");
}
final CharSequence contentType = headers.get(TripleHeaderEnum.CONTENT_TYPE_KEY.getHeader());
if (contentType == null
|| !contentType.toString().startsWith(TripleHeaderEnum.APPLICATION_GRPC.getHeader())) {
return TriRpcStatus.fromCode(TriRpcStatus.httpStatusToGrpcCode(httpStatus))
.withDescription("invalid content-type: " + contentType);
}
return null;
}
void onHeaderReceived(Http2Headers headers) {
if (transportError != null) {
transportError.appendDescription("headers:" + headers);
return;
}
if (headerReceived) {
transportError = TriRpcStatus.INTERNAL.withDescription("Received headers twice");
return;
}
Integer httpStatus = headers.status() == null
? null
: Integer.parseInt(headers.status().toString());
if (httpStatus != null && Integer.parseInt(httpStatus.toString()) > 100 && httpStatus < 200) {
// ignored
return;
}
headerReceived = true;
transportError = validateHeaderStatus(headers);
// todo support full payload compressor
CharSequence messageEncoding = headers.get(TripleHeaderEnum.GRPC_ENCODING.getHeader());
CharSequence triExceptionCode = headers.get(TripleHeaderEnum.TRI_EXCEPTION_CODE.getHeader());
if (triExceptionCode != null) {
Integer triExceptionCodeNum = Integer.parseInt(triExceptionCode.toString());
if (!(triExceptionCodeNum.equals(CommonConstants.TRI_EXCEPTION_CODE_NOT_EXISTS))) {
isReturnTriException = true;
}
}
if (null != messageEncoding) {
String compressorStr = messageEncoding.toString();
if (!Identity.IDENTITY.getMessageEncoding().equals(compressorStr)) {
DeCompressor compressor = DeCompressor.getCompressor(frameworkModel, compressorStr);
if (null == compressor) {
throw TriRpcStatus.UNIMPLEMENTED
.withDescription(String.format("Grpc-encoding '%s' is not supported", compressorStr))
.asException();
} else {
decompressor = compressor;
}
}
}
TriDecoder.Listener listener = new TriDecoder.Listener() {
@Override
public void onRawMessage(byte[] data) {
TripleClientStream.this.listener.onMessage(data, isReturnTriException);
}
public void close() {
finishProcess(statusFromTrailers(trailers), trailers, isReturnTriException);
}
};
deframer = new TriDecoder(decompressor, listener);
TripleClientStream.this.listener.onStart();
}
void onTrailersReceived(Http2Headers trailers) {
if (transportError == null && !headerReceived) {
transportError = validateHeaderStatus(trailers);
}
if (transportError != null) {
transportError = transportError.appendDescription("trailers: " + trailers);
} else {
this.trailers = trailers;
TriRpcStatus status = statusFromTrailers(trailers);
if (deframer == null) {
finishProcess(status, trailers, false);
}
if (deframer != null) {
deframer.close();
}
}
}
/**
* Extract the response status from trailers.
*/
private TriRpcStatus statusFromTrailers(Http2Headers trailers) {
final Integer intStatus = trailers.getInt(TripleHeaderEnum.STATUS_KEY.getHeader());
TriRpcStatus status = intStatus == null ? null : TriRpcStatus.fromCode(intStatus);
if (status != null) {
final CharSequence message = trailers.get(TripleHeaderEnum.MESSAGE_KEY.getHeader());
if (message != null) {
final String description = TriRpcStatus.decodeMessage(message.toString());
status = status.withDescription(description);
}
return status;
}
// No status; something is broken. Try to provide a rational error.
if (headerReceived) {
return TriRpcStatus.UNKNOWN.withDescription("missing GRPC status in response");
}
Integer httpStatus = trailers.status() == null
? null
: Integer.parseInt(trailers.status().toString());
if (httpStatus != null) {
status = TriRpcStatus.fromCode(TriRpcStatus.httpStatusToGrpcCode(httpStatus));
} else {
status = TriRpcStatus.INTERNAL.withDescription("missing HTTP status code");
}
return status.appendDescription("missing GRPC status, inferred error from HTTP status code");
}
private TriRpcStatus getStatusFromTrailers(Map<String, String> metadata) {
if (null == metadata) {
return null;
}
if (!getGrpcStatusDetailEnabled()) {
return null;
}
// second get status detail
if (!metadata.containsKey(TripleHeaderEnum.STATUS_DETAIL_KEY.getHeader())) {
return null;
}
final String raw = (metadata.remove(TripleHeaderEnum.STATUS_DETAIL_KEY.getHeader()));
byte[] statusDetailBin = StreamUtils.decodeASCIIByte(raw);
ClassLoader tccl = Thread.currentThread().getContextClassLoader();
try {
final Status statusDetail = Status.parseFrom(statusDetailBin);
List<Any> detailList = statusDetail.getDetailsList();
Map<Class<?>, Object> classObjectMap = tranFromStatusDetails(detailList);
// get common exception from DebugInfo
TriRpcStatus status = TriRpcStatus.fromCode(statusDetail.getCode())
.withDescription(TriRpcStatus.decodeMessage(statusDetail.getMessage()));
DebugInfo debugInfo = (DebugInfo) classObjectMap.get(DebugInfo.class);
if (debugInfo != null) {
String msg = ExceptionUtils.getStackFrameString(debugInfo.getStackEntriesList());
status = status.appendDescription(msg);
}
return status;
} catch (IOException ioException) {
return null;
} finally {
ClassLoadUtil.switchContextLoader(tccl);
}
}
private Map<Class<?>, Object> tranFromStatusDetails(List<Any> detailList) {
Map<Class<?>, Object> map = new HashMap<>(detailList.size());
try {
for (Any any : detailList) {
if (any.is(ErrorInfo.class)) {
ErrorInfo errorInfo = any.unpack(ErrorInfo.class);
map.putIfAbsent(ErrorInfo.class, errorInfo);
} else if (any.is(DebugInfo.class)) {
DebugInfo debugInfo = any.unpack(DebugInfo.class);
map.putIfAbsent(DebugInfo.class, debugInfo);
}
// support others type but now only support this
}
} catch (Throwable t) {
LOGGER.error(PROTOCOL_FAILED_RESPONSE, "", "", "tran from grpc-status-details error", t);
}
return map;
}
@Override
public void onHeader(Http2Headers headers, boolean endStream) {
executor.execute(() -> {
if (endStream) {
if (!halfClosed) {
Http2StreamChannel channel = streamChannelFuture.getNow();
if (channel.isActive() && !rst) {
writeQueue.enqueue(
CancelQueueCommand.createCommand(streamChannelFuture, Http2Error.CANCEL));
rst = true;
}
}
onTrailersReceived(headers);
} else {
onHeaderReceived(headers);
}
});
}
@Override
public void onData(ByteBuf data, boolean endStream) {
try {
executor.execute(() -> doOnData(data, endStream));
} catch (Throwable t) {
// Tasks will be rejected when the thread pool is closed or full,
// ByteBuf needs to be released to avoid out of heap memory leakage.
// For example, ThreadLessExecutor will be shutdown when request timeout {@link AsyncRpcResult}
ReferenceCountUtil.release(data);
LOGGER.error(PROTOCOL_FAILED_RESPONSE, "", "", "submit onData task failed", t);
}
}
private void doOnData(ByteBuf data, boolean endStream) {
if (transportError != null) {
transportError.appendDescription("Data:" + data.toString(StandardCharsets.UTF_8));
ReferenceCountUtil.release(data);
if (transportError.description.length() > 512 || endStream) {
handleH2TransportError(transportError);
}
return;
}
if (!headerReceived) {
handleH2TransportError(TriRpcStatus.INTERNAL.withDescription("headers not received before payload"));
return;
}
deframer.deframe(data);
}
@Override
public void cancelByRemote(long errorCode) {
executor.execute(() -> {
transportError =
TriRpcStatus.CANCELLED.withDescription("Canceled by remote peer, errorCode=" + errorCode);
finishProcess(transportError, null, false);
});
}
}
}
| 6,143 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/stream/TripleStreamChannelFuture.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.stream;
import org.apache.dubbo.common.utils.Assert;
import java.util.concurrent.CompletableFuture;
import io.netty.channel.Channel;
import io.netty.handler.codec.http2.Http2StreamChannel;
public class TripleStreamChannelFuture extends CompletableFuture<Http2StreamChannel> {
private final Channel parentChannel;
private Throwable cause;
public TripleStreamChannelFuture(Channel parentChannel) {
Assert.notNull(parentChannel, "parentChannel cannot be null.");
this.parentChannel = parentChannel;
}
public TripleStreamChannelFuture(Http2StreamChannel channel) {
this.complete(channel);
this.parentChannel = channel.parent();
}
public Channel getParentChannel() {
return parentChannel;
}
@Override
public boolean completeExceptionally(Throwable cause) {
boolean result = super.completeExceptionally(cause);
if (result) {
this.cause = cause;
}
return result;
}
public Throwable cause() {
return cause;
}
public boolean isSuccess() {
return isDone() && cause() == null;
}
public Http2StreamChannel getNow() {
return getNow(null);
}
}
| 6,144 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/stream/Stream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.stream;
import org.apache.dubbo.rpc.TriRpcStatus;
import java.net.SocketAddress;
import io.netty.handler.codec.http2.Http2Headers;
import io.netty.util.concurrent.Future;
/**
* Stream is a bi-directional channel that manipulates the data flow between peers. Inbound data
* from remote peer is acquired by {@link Listener}. Outbound data to remote peer is sent directly
* by {@link Stream}. Backpressure is supported by {@link #request(int)}.
*/
public interface Stream {
/**
* Register a {@link Listener} to receive inbound data from remote peer.
*/
interface Listener {
/**
* Callback when receive message. Note this method may be called many times if is a
* streaming .
*
* @param message message received from remote peer
*/
void onMessage(byte[] message, boolean isReturnTriException);
/**
* Callback when receive cancel signal.
*
* @param status the cancel status
*/
void onCancelByRemote(TriRpcStatus status);
}
/**
* Send headers to remote peer.
*
* @param headers headers to send to remote peer
* @return future to callback when send headers is done
*/
Future<?> sendHeader(Http2Headers headers);
/**
* Cancel by this peer.
*
* @param status cancel status to send to remote peer
* @return future to callback when cancel is done
*/
Future<?> cancelByLocal(TriRpcStatus status);
/**
* Get remote peer address.
*
* @return socket address of remote peer
*/
SocketAddress remoteAddress();
/**
* Request n message from remote peer.
*
* @param n number of message
*/
void request(int n);
}
| 6,145 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/stream/TripleServerStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.stream;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.common.constants.CommonConstants;
import org.apache.dubbo.common.logger.ErrorTypeAwareLogger;
import org.apache.dubbo.common.logger.LoggerFactory;
import org.apache.dubbo.common.utils.StringUtils;
import org.apache.dubbo.rpc.HeaderFilter;
import org.apache.dubbo.rpc.Invoker;
import org.apache.dubbo.rpc.PathResolver;
import org.apache.dubbo.rpc.TriRpcStatus;
import org.apache.dubbo.rpc.model.FrameworkModel;
import org.apache.dubbo.rpc.protocol.tri.ExceptionUtils;
import org.apache.dubbo.rpc.protocol.tri.TripleConstant;
import org.apache.dubbo.rpc.protocol.tri.TripleHeaderEnum;
import org.apache.dubbo.rpc.protocol.tri.TripleProtocol;
import org.apache.dubbo.rpc.protocol.tri.call.ReflectionAbstractServerCall;
import org.apache.dubbo.rpc.protocol.tri.call.StubAbstractServerCall;
import org.apache.dubbo.rpc.protocol.tri.command.CancelQueueCommand;
import org.apache.dubbo.rpc.protocol.tri.command.DataQueueCommand;
import org.apache.dubbo.rpc.protocol.tri.command.HeaderQueueCommand;
import org.apache.dubbo.rpc.protocol.tri.command.TextDataQueueCommand;
import org.apache.dubbo.rpc.protocol.tri.compressor.DeCompressor;
import org.apache.dubbo.rpc.protocol.tri.compressor.Identity;
import org.apache.dubbo.rpc.protocol.tri.frame.Deframer;
import org.apache.dubbo.rpc.protocol.tri.frame.TriDecoder;
import org.apache.dubbo.rpc.protocol.tri.transport.AbstractH2TransportListener;
import org.apache.dubbo.rpc.protocol.tri.transport.H2TransportListener;
import org.apache.dubbo.rpc.protocol.tri.transport.TripleWriteQueue;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.Executor;
import com.google.protobuf.Any;
import com.google.rpc.DebugInfo;
import com.google.rpc.Status;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelFuture;
import io.netty.handler.codec.http.HttpHeaderNames;
import io.netty.handler.codec.http.HttpMethod;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.netty.handler.codec.http.HttpUtil;
import io.netty.handler.codec.http2.DefaultHttp2Headers;
import io.netty.handler.codec.http2.Http2Error;
import io.netty.handler.codec.http2.Http2Headers;
import io.netty.handler.codec.http2.Http2StreamChannel;
import io.netty.util.ReferenceCountUtil;
import io.netty.util.concurrent.Future;
import static io.netty.handler.codec.http.HttpResponseStatus.OK;
import static org.apache.dubbo.common.constants.LoggerCodeConstants.PROTOCOL_FAILED_REQUEST;
public class TripleServerStream extends AbstractStream implements ServerStream {
private static final ErrorTypeAwareLogger LOGGER = LoggerFactory.getErrorTypeAwareLogger(TripleServerStream.class);
public final ServerTransportObserver transportObserver = new ServerTransportObserver();
private final TripleWriteQueue writeQueue;
private final PathResolver pathResolver;
private final List<HeaderFilter> filters;
private final String acceptEncoding;
private boolean headerSent;
private boolean trailersSent;
private volatile boolean reset;
private ServerStream.Listener listener;
private final InetSocketAddress remoteAddress;
private Deframer deframer;
private boolean rst = false;
private final Http2StreamChannel http2StreamChannel;
private final TripleStreamChannelFuture tripleStreamChannelFuture;
public TripleServerStream(
Http2StreamChannel channel,
FrameworkModel frameworkModel,
Executor executor,
PathResolver pathResolver,
String acceptEncoding,
List<HeaderFilter> filters,
TripleWriteQueue writeQueue) {
super(executor, frameworkModel);
this.pathResolver = pathResolver;
this.acceptEncoding = acceptEncoding;
this.filters = filters;
this.writeQueue = writeQueue;
this.remoteAddress = (InetSocketAddress) channel.remoteAddress();
this.http2StreamChannel = channel;
this.tripleStreamChannelFuture = new TripleStreamChannelFuture(channel);
}
@Override
public SocketAddress remoteAddress() {
return remoteAddress;
}
@Override
public void request(int n) {
deframer.request(n);
}
public ChannelFuture reset(Http2Error cause) {
ChannelFuture checkResult = preCheck();
if (!checkResult.isSuccess()) {
return checkResult;
}
this.rst = true;
return writeQueue.enqueue(CancelQueueCommand.createCommand(tripleStreamChannelFuture, cause));
}
@Override
public ChannelFuture sendHeader(Http2Headers headers) {
if (reset) {
return http2StreamChannel.newFailedFuture(
new IllegalStateException("Stream already reset, no more headers allowed"));
}
if (headerSent) {
return http2StreamChannel.newFailedFuture(new IllegalStateException("Header already sent"));
}
if (trailersSent) {
return http2StreamChannel.newFailedFuture(new IllegalStateException("Trailers already sent"));
}
ChannelFuture checkResult = preCheck();
if (!checkResult.isSuccess()) {
return checkResult;
}
headerSent = true;
return writeQueue
.enqueue(HeaderQueueCommand.createHeaders(tripleStreamChannelFuture, headers, false))
.addListener(f -> {
if (!f.isSuccess()) {
reset(Http2Error.INTERNAL_ERROR);
}
});
}
@Override
public Future<?> cancelByLocal(TriRpcStatus status) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Cancel stream:%s by local: %s", http2StreamChannel, status));
}
return reset(Http2Error.CANCEL);
}
@Override
public ChannelFuture complete(
TriRpcStatus status, Map<String, Object> attachments, boolean isNeedReturnException, int exceptionCode) {
Http2Headers trailers =
getTrailers(status, attachments, isNeedReturnException, CommonConstants.TRI_EXCEPTION_CODE_NOT_EXISTS);
return sendTrailers(trailers);
}
private ChannelFuture sendTrailers(Http2Headers trailers) {
if (reset) {
return http2StreamChannel.newFailedFuture(
new IllegalStateException("Stream already reset, no more trailers allowed"));
}
if (trailersSent) {
return http2StreamChannel.newFailedFuture(new IllegalStateException("Trailers already sent"));
}
ChannelFuture checkResult = preCheck();
if (!checkResult.isSuccess()) {
return checkResult;
}
headerSent = true;
trailersSent = true;
return writeQueue
.enqueue(HeaderQueueCommand.createHeaders(tripleStreamChannelFuture, trailers, true))
.addListener(f -> {
if (!f.isSuccess()) {
reset(Http2Error.INTERNAL_ERROR);
}
});
}
private Http2Headers getTrailers(
TriRpcStatus rpcStatus, Map<String, Object> attachments, boolean isNeedReturnException, int exceptionCode) {
DefaultHttp2Headers headers = new DefaultHttp2Headers();
if (!headerSent) {
headers.status(HttpResponseStatus.OK.codeAsText());
headers.set(HttpHeaderNames.CONTENT_TYPE, TripleConstant.CONTENT_PROTO);
}
StreamUtils.convertAttachment(headers, attachments, TripleProtocol.CONVERT_NO_LOWER_HEADER);
headers.set(TripleHeaderEnum.STATUS_KEY.getHeader(), String.valueOf(rpcStatus.code.code));
if (rpcStatus.isOk()) {
return headers;
}
String grpcMessage = getGrpcMessage(rpcStatus);
grpcMessage = TriRpcStatus.encodeMessage(TriRpcStatus.limitSizeTo1KB(grpcMessage));
headers.set(TripleHeaderEnum.MESSAGE_KEY.getHeader(), grpcMessage);
if (!getGrpcStatusDetailEnabled()) {
return headers;
}
Status.Builder builder =
Status.newBuilder().setCode(rpcStatus.code.code).setMessage(grpcMessage);
Throwable throwable = rpcStatus.cause;
if (throwable == null) {
Status status = builder.build();
headers.set(
TripleHeaderEnum.STATUS_DETAIL_KEY.getHeader(),
StreamUtils.encodeBase64ASCII(status.toByteArray()));
return headers;
}
DebugInfo debugInfo = DebugInfo.newBuilder()
.addAllStackEntries(ExceptionUtils.getStackFrameList(throwable, 6))
// can not use now
// .setDetail(throwable.getMessage())
.build();
builder.addDetails(Any.pack(debugInfo));
Status status = builder.build();
headers.set(
TripleHeaderEnum.STATUS_DETAIL_KEY.getHeader(), StreamUtils.encodeBase64ASCII(status.toByteArray()));
return headers;
}
private String getGrpcMessage(TriRpcStatus status) {
if (StringUtils.isNotEmpty(status.description)) {
return status.description;
}
return Optional.ofNullable(status.cause).map(Throwable::getMessage).orElse("unknown");
}
@Override
public ChannelFuture sendMessage(byte[] message, int compressFlag) {
if (reset) {
return http2StreamChannel.newFailedFuture(
new IllegalStateException("Stream already reset, no more body allowed"));
}
if (!headerSent) {
return http2StreamChannel.newFailedFuture(
new IllegalStateException("Headers did not sent before send body"));
}
if (trailersSent) {
return http2StreamChannel.newFailedFuture(
new IllegalStateException("Trailers already sent, no more body allowed"));
}
ChannelFuture checkResult = preCheck();
if (!checkResult.isSuccess()) {
return checkResult;
}
return writeQueue.enqueue(DataQueueCommand.create(tripleStreamChannelFuture, message, false, compressFlag));
}
/**
* Error before create server stream, http plain text will be returned
*
* @param code code of error
* @param status status of error
*/
private void responsePlainTextError(int code, TriRpcStatus status) {
ChannelFuture checkResult = preCheck();
if (!checkResult.isSuccess()) {
return;
}
Http2Headers headers = new DefaultHttp2Headers(true)
.status(String.valueOf(code))
.setInt(TripleHeaderEnum.STATUS_KEY.getHeader(), status.code.code)
.set(TripleHeaderEnum.MESSAGE_KEY.getHeader(), status.description)
.set(TripleHeaderEnum.CONTENT_TYPE_KEY.getHeader(), TripleConstant.TEXT_PLAIN_UTF8);
writeQueue.enqueue(HeaderQueueCommand.createHeaders(tripleStreamChannelFuture, headers, false));
writeQueue.enqueue(TextDataQueueCommand.createCommand(tripleStreamChannelFuture, status.description, true));
}
/**
* Error in create stream, unsupported config or triple protocol error. There is no return value
* because stream will be reset if send trailers failed.
*
* @param status status of error
*/
private void responseErr(TriRpcStatus status) {
Http2Headers trailers = new DefaultHttp2Headers()
.status(OK.codeAsText())
.set(HttpHeaderNames.CONTENT_TYPE, TripleConstant.CONTENT_PROTO)
.setInt(TripleHeaderEnum.STATUS_KEY.getHeader(), status.code.code)
.set(TripleHeaderEnum.MESSAGE_KEY.getHeader(), status.toEncodedMessage());
sendTrailers(trailers);
}
private Invoker<?> getInvoker(Http2Headers headers, String serviceName) {
final String version = headers.contains(TripleHeaderEnum.SERVICE_VERSION.getHeader())
? headers.get(TripleHeaderEnum.SERVICE_VERSION.getHeader()).toString()
: null;
final String group = headers.contains(TripleHeaderEnum.SERVICE_GROUP.getHeader())
? headers.get(TripleHeaderEnum.SERVICE_GROUP.getHeader()).toString()
: null;
final String key = URL.buildKey(serviceName, group, version);
Invoker<?> invoker = pathResolver.resolve(key);
if (invoker == null && TripleProtocol.RESOLVE_FALLBACK_TO_DEFAULT) {
invoker = pathResolver.resolve(URL.buildKey(serviceName, group, "1.0.0"));
}
if (invoker == null && TripleProtocol.RESOLVE_FALLBACK_TO_DEFAULT) {
invoker = pathResolver.resolve(serviceName);
}
return invoker;
}
private ChannelFuture preCheck() {
if (!http2StreamChannel.isActive()) {
return http2StreamChannel.newFailedFuture(new IOException("stream channel is closed"));
}
if (rst) {
return http2StreamChannel.newFailedFuture(new IOException("stream channel has reset"));
}
return http2StreamChannel.newSucceededFuture();
}
public class ServerTransportObserver extends AbstractH2TransportListener implements H2TransportListener {
/**
* must starts from application/grpc
*/
private boolean supportContentType(String contentType) {
if (contentType == null) {
return false;
}
return contentType.startsWith(TripleConstant.APPLICATION_GRPC);
}
@Override
public void onHeader(Http2Headers headers, boolean endStream) {
executor.execute(() -> processHeader(headers, endStream));
}
private void processHeader(Http2Headers headers, boolean endStream) {
if (!HttpMethod.POST.asciiName().contentEquals(headers.method())) {
responsePlainTextError(
HttpResponseStatus.METHOD_NOT_ALLOWED.code(),
TriRpcStatus.INTERNAL.withDescription(
String.format("Method '%s' is not supported", headers.method())));
return;
}
if (headers.path() == null) {
responsePlainTextError(
HttpResponseStatus.NOT_FOUND.code(),
TriRpcStatus.fromCode(TriRpcStatus.Code.UNIMPLEMENTED.code)
.withDescription("Expected path but is missing"));
return;
}
final String path = headers.path().toString();
if (path.charAt(0) != '/') {
responsePlainTextError(
HttpResponseStatus.NOT_FOUND.code(),
TriRpcStatus.fromCode(TriRpcStatus.Code.UNIMPLEMENTED.code)
.withDescription(String.format("Expected path to start with /: %s", path)));
return;
}
final CharSequence contentType = HttpUtil.getMimeType(headers.get(HttpHeaderNames.CONTENT_TYPE));
if (contentType == null) {
responsePlainTextError(
HttpResponseStatus.UNSUPPORTED_MEDIA_TYPE.code(),
TriRpcStatus.fromCode(TriRpcStatus.Code.INTERNAL.code)
.withDescription("Content-Type is missing from the request"));
return;
}
final String contentString = contentType.toString();
if (!supportContentType(contentString)) {
responsePlainTextError(
HttpResponseStatus.UNSUPPORTED_MEDIA_TYPE.code(),
TriRpcStatus.fromCode(TriRpcStatus.Code.INTERNAL.code)
.withDescription(String.format("Content-Type '%s' is not supported", contentString)));
return;
}
String[] parts = path.split("/");
if (parts.length != 3) {
responseErr(TriRpcStatus.UNIMPLEMENTED.withDescription("Bad path format:" + path));
return;
}
String serviceName = parts[1];
String originalMethodName = parts[2];
Invoker<?> invoker = getInvoker(headers, serviceName);
if (invoker == null) {
responseErr(TriRpcStatus.UNIMPLEMENTED.withDescription("Service not found:" + serviceName));
return;
}
if (endStream) {
return;
}
DeCompressor deCompressor = DeCompressor.NONE;
CharSequence messageEncoding = headers.get(TripleHeaderEnum.GRPC_ENCODING.getHeader());
if (null != messageEncoding) {
String compressorStr = messageEncoding.toString();
if (!Identity.MESSAGE_ENCODING.equals(compressorStr)) {
DeCompressor compressor = DeCompressor.getCompressor(frameworkModel, compressorStr);
if (null == compressor) {
responseErr(TriRpcStatus.fromCode(TriRpcStatus.Code.UNIMPLEMENTED.code)
.withDescription(String.format("Grpc-encoding '%s' is not supported", compressorStr)));
return;
}
deCompressor = compressor;
}
}
Map<String, Object> requestMetadata = headersToMap(
headers, () -> Optional.ofNullable(headers.get(TripleHeaderEnum.TRI_HEADER_CONVERT.getHeader()))
.map(CharSequence::toString)
.orElse(null));
boolean hasStub = pathResolver.hasNativeStub(path);
if (hasStub) {
listener = new StubAbstractServerCall(
invoker,
TripleServerStream.this,
frameworkModel,
acceptEncoding,
serviceName,
originalMethodName,
executor);
} else {
listener = new ReflectionAbstractServerCall(
invoker,
TripleServerStream.this,
frameworkModel,
acceptEncoding,
serviceName,
originalMethodName,
filters,
executor);
}
// must before onHeader
deframer = new TriDecoder(deCompressor, new ServerDecoderListener(listener));
listener.onHeader(requestMetadata);
}
@Override
public void onData(ByteBuf data, boolean endStream) {
try {
executor.execute(() -> doOnData(data, endStream));
} catch (Throwable t) {
// Tasks will be rejected when the thread pool is closed or full,
// ByteBuf needs to be released to avoid out of heap memory leakage.
// For example, ThreadLessExecutor will be shutdown when request timeout {@link AsyncRpcResult}
ReferenceCountUtil.release(data);
LOGGER.error(PROTOCOL_FAILED_REQUEST, "", "", "submit onData task failed", t);
}
}
private void doOnData(ByteBuf data, boolean endStream) {
if (deframer == null) {
return;
}
deframer.deframe(data);
if (endStream) {
deframer.close();
}
}
@Override
public void cancelByRemote(long errorCode) {
TripleServerStream.this.reset = true;
if (!trailersSent) {
// send rst if stream not closed
reset(Http2Error.valueOf(errorCode));
}
if (listener == null) {
return;
}
executor.execute(() -> listener.onCancelByRemote(
TriRpcStatus.CANCELLED.withDescription("Canceled by client ,errorCode=" + errorCode)));
}
}
private static class ServerDecoderListener implements TriDecoder.Listener {
private final ServerStream.Listener listener;
public ServerDecoderListener(ServerStream.Listener listener) {
this.listener = listener;
}
@Override
public void onRawMessage(byte[] data) {
listener.onMessage(data, false);
}
@Override
public void close() {
listener.onComplete();
}
}
}
| 6,146 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/stream/ServerStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.stream;
import org.apache.dubbo.rpc.TriRpcStatus;
import java.util.Map;
import io.netty.util.concurrent.Future;
/**
* ServerStream is used to send response to client and receive requests from client. {@link
* Listener} is used to receive requests from client.
*/
public interface ServerStream extends Stream {
interface Listener extends Stream.Listener {
/**
* Callback when receive headers
*
* @param headers headers received from remote peer
*/
void onHeader(Map<String, Object> headers);
/**
* Callback when no more data from client side
*/
void onComplete();
}
/**
* Complete the stream, send response to client
*
* @param status response status
* @param attachments response attachments
* @return a future that indicates the completion of send trailers
*/
Future<?> complete(
TriRpcStatus status, Map<String, Object> attachments, boolean isNeedReturnException, int exceptionCode);
/**
* Send message to client
*
* @param message raw message
* @param compressFlag whether to compress the message
* @return a future that indicates the completion of send message
*/
Future<?> sendMessage(byte[] message, int compressFlag);
}
| 6,147 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/stream/AbstractStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.stream;
import org.apache.dubbo.common.threadpool.serial.SerializingExecutor;
import org.apache.dubbo.common.utils.ClassUtils;
import org.apache.dubbo.rpc.model.FrameworkModel;
import java.util.concurrent.Executor;
/**
* An abstract stream implementation.
*/
public abstract class AbstractStream implements Stream {
protected Executor executor;
protected final FrameworkModel frameworkModel;
private static final boolean HAS_PROTOBUF = hasProtobuf();
public AbstractStream(Executor executor, FrameworkModel frameworkModel) {
this.executor = new SerializingExecutor(executor);
this.frameworkModel = frameworkModel;
}
public void setExecutor(Executor executor) {
this.executor = new SerializingExecutor(executor);
}
public static boolean getGrpcStatusDetailEnabled() {
return HAS_PROTOBUF;
}
private static boolean hasProtobuf() {
try {
ClassUtils.forName("com.google.protobuf.Message");
return true;
} catch (ClassNotFoundException ignore) {
return false;
}
}
}
| 6,148 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/observer/ClientCallToObserverAdapter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.observer;
import org.apache.dubbo.rpc.protocol.tri.CancelableStreamObserver;
import org.apache.dubbo.rpc.protocol.tri.ClientStreamObserver;
import org.apache.dubbo.rpc.protocol.tri.call.ClientCall;
public class ClientCallToObserverAdapter<T> extends CancelableStreamObserver<T> implements ClientStreamObserver<T> {
private final ClientCall call;
private boolean terminated;
public ClientCallToObserverAdapter(ClientCall call) {
this.call = call;
}
public boolean isAutoRequestEnabled() {
return call.isAutoRequest();
}
@Override
public void onNext(Object data) {
if (terminated) {
throw new IllegalStateException("Stream observer has been terminated, no more data is allowed");
}
call.sendMessage(data);
}
@Override
public void onError(Throwable throwable) {
call.cancelByLocal(throwable);
this.terminated = true;
}
@Override
public void onCompleted() {
if (terminated) {
return;
}
call.halfClose();
this.terminated = true;
}
@Override
public void cancel(Throwable throwable) {
call.cancelByLocal(throwable);
this.terminated = true;
}
@Override
public void setCompression(String compression) {
call.setCompression(compression);
}
@Override
public void request(int count) {
call.request(count);
}
@Override
public void disableAutoFlowControl() {
call.setAutoRequest(false);
}
}
| 6,149 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/observer/CallStreamObserver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.observer;
import org.apache.dubbo.common.stream.StreamObserver;
import org.apache.dubbo.rpc.protocol.tri.compressor.Compressor;
public interface CallStreamObserver<T> extends StreamObserver<T> {
/**
* Requests the peer to produce {@code count} more messages to be delivered to the 'inbound'
* {@link StreamObserver}.
*
* <p>This method is safe to call from multiple threads without external synchronization.
*
* @param count more messages
*/
void request(int count);
/**
* Sets the compression algorithm to use for the call
* <p>
* For stream set compression needs to determine whether the metadata has been sent, and carry
* on corresponding processing
*
* @param compression {@link Compressor}
*/
void setCompression(String compression);
/**
* Swaps to manual flow control where no message will be delivered to {@link
* StreamObserver#onNext(Object)} unless it is {@link #request request()}ed. Since {@code
* request()} may not be called before the call is started, a number of initial requests may be
* specified.
*/
void disableAutoFlowControl();
}
| 6,150 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/observer/ServerCallToObserverAdapter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.observer;
import org.apache.dubbo.common.constants.CommonConstants;
import org.apache.dubbo.common.logger.Logger;
import org.apache.dubbo.common.logger.LoggerFactory;
import org.apache.dubbo.rpc.CancellationContext;
import org.apache.dubbo.rpc.TriRpcStatus;
import org.apache.dubbo.rpc.protocol.tri.CancelableStreamObserver;
import org.apache.dubbo.rpc.protocol.tri.ServerStreamObserver;
import org.apache.dubbo.rpc.protocol.tri.call.AbstractServerCall;
import java.util.Map;
public class ServerCallToObserverAdapter<T> extends CancelableStreamObserver<T> implements ServerStreamObserver<T> {
private static final Logger LOGGER = LoggerFactory.getLogger(CancelableStreamObserver.class);
public final CancellationContext cancellationContext;
private final AbstractServerCall call;
private Map<String, Object> attachments;
private boolean terminated = false;
private boolean isNeedReturnException = false;
private Integer exceptionCode = CommonConstants.TRI_EXCEPTION_CODE_NOT_EXISTS;
public Integer getExceptionCode() {
return exceptionCode;
}
public void setExceptionCode(Integer exceptionCode) {
this.exceptionCode = exceptionCode;
}
public boolean isNeedReturnException() {
return isNeedReturnException;
}
public void setNeedReturnException(boolean needReturnException) {
isNeedReturnException = needReturnException;
}
public ServerCallToObserverAdapter(AbstractServerCall call, CancellationContext cancellationContext) {
this.call = call;
this.cancellationContext = cancellationContext;
}
public boolean isAutoRequestN() {
return call.isAutoRequestN();
}
public boolean isTerminated() {
return terminated;
}
private void setTerminated() {
this.terminated = true;
}
@Override
public void onNext(Object data) {
if (isTerminated()) {
throw new IllegalStateException("Stream observer has been terminated, no more data is allowed");
}
call.setExceptionCode(exceptionCode);
call.setNeedReturnException(isNeedReturnException);
call.sendMessage(data);
}
@Override
public void onError(Throwable throwable) {
final TriRpcStatus status = TriRpcStatus.getStatus(throwable);
onCompleted(status);
}
public void onCompleted(TriRpcStatus status) {
if (isTerminated()) {
return;
}
call.setExceptionCode(exceptionCode);
call.setNeedReturnException(isNeedReturnException);
call.close(status, attachments);
setTerminated();
}
@Override
public void onCompleted() {
onCompleted(TriRpcStatus.OK);
}
public void setResponseAttachments(Map<String, Object> attachments) {
this.attachments = attachments;
}
@Override
public void setCompression(String compression) {
call.setCompression(compression);
}
public void cancel(Throwable throwable) {
if (terminated) {
return;
}
setTerminated();
call.cancelByLocal(throwable);
}
public boolean isTimeout(long cost) {
return call.timeout != null && call.timeout < cost;
}
@Override
public void disableAutoFlowControl() {
call.disableAutoRequestN();
}
@Override
public void request(int count) {
call.request(count);
}
}
| 6,151 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/frame/TriDecoder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.frame;
import org.apache.dubbo.rpc.RpcException;
import org.apache.dubbo.rpc.protocol.tri.compressor.DeCompressor;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.CompositeByteBuf;
import io.netty.buffer.Unpooled;
public class TriDecoder implements Deframer {
private static final int HEADER_LENGTH = 5;
private static final int COMPRESSED_FLAG_MASK = 1;
private static final int RESERVED_MASK = 0xFE;
private final CompositeByteBuf accumulate = Unpooled.compositeBuffer();
private final Listener listener;
private final DeCompressor decompressor;
private boolean compressedFlag;
private long pendingDeliveries;
private boolean inDelivery = false;
private boolean closing;
private boolean closed;
private int requiredLength = HEADER_LENGTH;
private GrpcDecodeState state = GrpcDecodeState.HEADER;
public TriDecoder(DeCompressor decompressor, Listener listener) {
this.decompressor = decompressor;
this.listener = listener;
}
@Override
public void deframe(ByteBuf data) {
if (closing || closed) {
// ignored
return;
}
accumulate.addComponent(true, data);
deliver();
}
public void request(int numMessages) {
pendingDeliveries += numMessages;
deliver();
}
@Override
public void close() {
closing = true;
deliver();
}
private void deliver() {
// We can have reentrancy here when using a direct executor, triggered by calls to
// request more messages. This is safe as we simply loop until pendingDelivers = 0
if (inDelivery) {
return;
}
inDelivery = true;
try {
// Process the uncompressed bytes.
while (pendingDeliveries > 0 && hasEnoughBytes()) {
switch (state) {
case HEADER:
processHeader();
break;
case PAYLOAD:
// Read the body and deliver the message.
processBody();
// Since we've delivered a message, decrement the number of pending
// deliveries remaining.
pendingDeliveries--;
break;
default:
throw new AssertionError("Invalid state: " + state);
}
}
if (closing) {
if (!closed) {
closed = true;
accumulate.clear();
accumulate.release();
listener.close();
}
}
} finally {
inDelivery = false;
}
}
private boolean hasEnoughBytes() {
return requiredLength - accumulate.readableBytes() <= 0;
}
/**
* Processes the GRPC compression header which is composed of the compression flag and the outer
* frame length.
*/
private void processHeader() {
int type = accumulate.readUnsignedByte();
if ((type & RESERVED_MASK) != 0) {
throw new RpcException("gRPC frame header malformed: reserved bits not zero");
}
compressedFlag = (type & COMPRESSED_FLAG_MASK) != 0;
requiredLength = accumulate.readInt();
// Continue reading the frame body.
state = GrpcDecodeState.PAYLOAD;
}
/**
* Processes the GRPC message body, which depending on frame header flags may be compressed.
*/
private void processBody() {
// There is no reliable way to get the uncompressed size per message when it's compressed,
// because the uncompressed bytes are provided through an InputStream whose total size is
// unknown until all bytes are read, and we don't know when it happens.
byte[] stream = compressedFlag ? getCompressedBody() : getUncompressedBody();
listener.onRawMessage(stream);
// Done with this frame, begin processing the next header.
state = GrpcDecodeState.HEADER;
requiredLength = HEADER_LENGTH;
}
private byte[] getCompressedBody() {
final byte[] compressedBody = getUncompressedBody();
return decompressor.decompress(compressedBody);
}
private byte[] getUncompressedBody() {
byte[] data = new byte[requiredLength];
accumulate.readBytes(data);
accumulate.discardReadComponents();
return data;
}
private enum GrpcDecodeState {
HEADER,
PAYLOAD
}
public interface Listener {
void onRawMessage(byte[] data);
void close();
}
}
| 6,152 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/frame/Deframer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.frame;
import io.netty.buffer.ByteBuf;
public interface Deframer {
/**
* Adds the given data to this deframer and attempts delivery to the listener.
*
* @param data the raw data read from the remote endpoint. Must be non-null.
*/
void deframe(ByteBuf data);
/**
* Requests up to the given number of messages from the call. No additional messages will be
* delivered.
*
* <p>If {@link #close()} has been called, this method will have no effect.
*
* @param numMessages the requested number of messages to be delivered to the listener.
*/
void request(int numMessages);
/**
* Closes this deframer and frees any resources. After this method is called, additional calls
* will have no effect.
*/
void close();
}
| 6,153 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/call/ObserverToClientCallListenerAdapter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.call;
import org.apache.dubbo.common.stream.StreamObserver;
import org.apache.dubbo.rpc.TriRpcStatus;
import java.util.Map;
import java.util.function.Consumer;
public class ObserverToClientCallListenerAdapter implements ClientCall.Listener {
private final StreamObserver<Object> delegate;
private ClientCall call;
private Consumer<ClientCall> onStartConsumer = clientCall -> {};
public ObserverToClientCallListenerAdapter(StreamObserver<Object> delegate) {
this.delegate = delegate;
}
public void setOnStartConsumer(Consumer<ClientCall> onStartConsumer) {
this.onStartConsumer = onStartConsumer;
}
@Override
public void onMessage(Object message, int actualContentLength) {
delegate.onNext(message);
if (call.isAutoRequest()) {
call.request(1);
}
}
@Override
public void onClose(TriRpcStatus status, Map<String, Object> trailers, boolean isReturnTriException) {
if (status.isOk()) {
delegate.onCompleted();
} else {
delegate.onError(status.asException());
}
}
@Override
public void onStart(ClientCall call) {
this.call = call;
if (call.isAutoRequest()) {
call.request(1);
}
onStartConsumer.accept(call);
}
}
| 6,154 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/call/AbstractServerCallListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.call;
import org.apache.dubbo.common.logger.ErrorTypeAwareLogger;
import org.apache.dubbo.common.logger.LoggerFactory;
import org.apache.dubbo.rpc.CancellationContext;
import org.apache.dubbo.rpc.Invoker;
import org.apache.dubbo.rpc.Result;
import org.apache.dubbo.rpc.RpcContext;
import org.apache.dubbo.rpc.RpcInvocation;
import org.apache.dubbo.rpc.TriRpcStatus;
import org.apache.dubbo.rpc.protocol.tri.TripleHeaderEnum;
import org.apache.dubbo.rpc.protocol.tri.observer.ServerCallToObserverAdapter;
import java.net.InetSocketAddress;
import static org.apache.dubbo.common.constants.CommonConstants.REMOTE_APPLICATION_KEY;
import static org.apache.dubbo.common.constants.LoggerCodeConstants.PROTOCOL_TIMEOUT_SERVER;
public abstract class AbstractServerCallListener implements AbstractServerCall.Listener {
private static final ErrorTypeAwareLogger LOGGER =
LoggerFactory.getErrorTypeAwareLogger(AbstractServerCallListener.class);
public final CancellationContext cancellationContext;
final RpcInvocation invocation;
final Invoker<?> invoker;
final ServerCallToObserverAdapter<Object> responseObserver;
public AbstractServerCallListener(
RpcInvocation invocation, Invoker<?> invoker, ServerCallToObserverAdapter<Object> responseObserver) {
this.invocation = invocation;
this.invoker = invoker;
this.cancellationContext = responseObserver.cancellationContext;
this.responseObserver = responseObserver;
}
public void invoke() {
RpcContext.restoreCancellationContext(cancellationContext);
InetSocketAddress remoteAddress =
(InetSocketAddress) invocation.getAttributes().remove(AbstractServerCall.REMOTE_ADDRESS_KEY);
RpcContext.getServiceContext().setRemoteAddress(remoteAddress);
String remoteApp = (String) invocation.getAttributes().remove(TripleHeaderEnum.CONSUMER_APP_NAME_KEY);
if (null != remoteApp) {
RpcContext.getServiceContext().setRemoteApplicationName(remoteApp);
invocation.setAttachmentIfAbsent(REMOTE_APPLICATION_KEY, remoteApp);
}
final long stInMillis = System.currentTimeMillis();
try {
final Result response = invoker.invoke(invocation);
response.whenCompleteWithContext((r, t) -> {
responseObserver.setResponseAttachments(response.getObjectAttachments());
if (t != null) {
responseObserver.onError(t);
return;
}
if (response.hasException()) {
doOnResponseHasException(response.getException());
return;
}
final long cost = System.currentTimeMillis() - stInMillis;
if (responseObserver.isTimeout(cost)) {
LOGGER.error(
PROTOCOL_TIMEOUT_SERVER,
"",
"",
String.format(
"Invoke timeout at server side, ignored to send response. service=%s method=%s cost=%s",
invocation.getTargetServiceUniqueName(), invocation.getMethodName(), cost));
responseObserver.onCompleted(TriRpcStatus.DEADLINE_EXCEEDED);
return;
}
onReturn(r.getValue());
});
} catch (Exception e) {
responseObserver.onError(e);
} finally {
RpcContext.removeCancellationContext();
RpcContext.removeContext();
}
}
protected void doOnResponseHasException(Throwable t) {
responseObserver.onError(t);
}
public abstract void onReturn(Object value);
}
| 6,155 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/call/AbstractServerCall.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.call;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.common.constants.CommonConstants;
import org.apache.dubbo.common.logger.ErrorTypeAwareLogger;
import org.apache.dubbo.common.logger.LoggerFactory;
import org.apache.dubbo.common.utils.StringUtils;
import org.apache.dubbo.rpc.CancellationContext;
import org.apache.dubbo.rpc.Invoker;
import org.apache.dubbo.rpc.RpcContext;
import org.apache.dubbo.rpc.RpcInvocation;
import org.apache.dubbo.rpc.TriRpcStatus;
import org.apache.dubbo.rpc.model.FrameworkModel;
import org.apache.dubbo.rpc.model.MethodDescriptor;
import org.apache.dubbo.rpc.model.PackableMethod;
import org.apache.dubbo.rpc.model.ServiceDescriptor;
import org.apache.dubbo.rpc.protocol.tri.ClassLoadUtil;
import org.apache.dubbo.rpc.protocol.tri.TripleConstant;
import org.apache.dubbo.rpc.protocol.tri.TripleHeaderEnum;
import org.apache.dubbo.rpc.protocol.tri.compressor.Compressor;
import org.apache.dubbo.rpc.protocol.tri.compressor.Identity;
import org.apache.dubbo.rpc.protocol.tri.observer.ServerCallToObserverAdapter;
import org.apache.dubbo.rpc.protocol.tri.stream.ServerStream;
import org.apache.dubbo.rpc.protocol.tri.stream.StreamUtils;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import io.netty.handler.codec.http.HttpHeaderNames;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.netty.handler.codec.http2.DefaultHttp2Headers;
import io.netty.util.concurrent.Future;
import static org.apache.dubbo.common.constants.LoggerCodeConstants.PROTOCOL_FAILED_CREATE_STREAM_TRIPLE;
import static org.apache.dubbo.common.constants.LoggerCodeConstants.PROTOCOL_FAILED_PARSE;
import static org.apache.dubbo.common.constants.LoggerCodeConstants.PROTOCOL_FAILED_REQUEST;
import static org.apache.dubbo.common.constants.LoggerCodeConstants.PROTOCOL_FAILED_SERIALIZE_TRIPLE;
public abstract class AbstractServerCall implements ServerCall, ServerStream.Listener {
public static final String REMOTE_ADDRESS_KEY = "tri.remote.address";
private static final ErrorTypeAwareLogger LOGGER = LoggerFactory.getErrorTypeAwareLogger(AbstractServerCall.class);
public final Invoker<?> invoker;
public final FrameworkModel frameworkModel;
public final ServerStream stream;
public final Executor executor;
public final String methodName;
public final String serviceName;
public final ServiceDescriptor serviceDescriptor;
private final String acceptEncoding;
public boolean autoRequestN = true;
public Long timeout;
ServerCall.Listener listener;
private Compressor compressor;
private boolean headerSent;
private boolean closed;
CancellationContext cancellationContext;
protected MethodDescriptor methodDescriptor;
protected PackableMethod packableMethod;
protected Map<String, Object> requestMetadata;
private Integer exceptionCode = CommonConstants.TRI_EXCEPTION_CODE_NOT_EXISTS;
public Integer getExceptionCode() {
return exceptionCode;
}
public void setExceptionCode(Integer exceptionCode) {
this.exceptionCode = exceptionCode;
}
private boolean isNeedReturnException = false;
public boolean isNeedReturnException() {
return isNeedReturnException;
}
public void setNeedReturnException(boolean needReturnException) {
isNeedReturnException = needReturnException;
}
AbstractServerCall(
Invoker<?> invoker,
ServerStream stream,
FrameworkModel frameworkModel,
ServiceDescriptor serviceDescriptor,
String acceptEncoding,
String serviceName,
String methodName,
Executor executor) {
Objects.requireNonNull(serviceDescriptor, "No service descriptor found for " + invoker.getUrl());
this.invoker = invoker;
// is already serialized in the stream, so we don't need to serialize it again.
this.executor = executor;
this.frameworkModel = frameworkModel;
this.serviceDescriptor = serviceDescriptor;
this.serviceName = serviceName;
this.methodName = methodName;
this.stream = stream;
this.acceptEncoding = acceptEncoding;
}
// stream listener start
@Override
public void onHeader(Map<String, Object> requestMetadata) {
this.requestMetadata = requestMetadata;
if (serviceDescriptor == null) {
responseErr(TriRpcStatus.UNIMPLEMENTED.withDescription("Service not found:" + serviceName));
return;
}
startCall();
}
protected void startCall() {
RpcInvocation invocation = buildInvocation(methodDescriptor);
listener = startInternalCall(invocation, methodDescriptor, invoker);
}
@Override
public final void request(int numMessages) {
stream.request(numMessages);
}
@Override
public final void sendMessage(Object message) {
if (closed) {
throw new IllegalStateException("Stream has already canceled");
}
// is already in executor
doSendMessage(message);
}
private void doSendMessage(Object message) {
if (closed) {
return;
}
if (!headerSent) {
sendHeader();
}
final byte[] data;
try {
data = packableMethod.packResponse(message);
} catch (Exception e) {
close(
TriRpcStatus.INTERNAL
.withDescription("Serialize response failed")
.withCause(e),
null);
LOGGER.error(
PROTOCOL_FAILED_SERIALIZE_TRIPLE,
"",
"",
String.format("Serialize triple response failed, service=%s method=%s", serviceName, methodName),
e);
return;
}
if (data == null) {
close(TriRpcStatus.INTERNAL.withDescription("Missing response"), null);
return;
}
Future<?> future;
if (compressor != null) {
int compressedFlag = Identity.MESSAGE_ENCODING.equals(compressor.getMessageEncoding()) ? 0 : 1;
final byte[] compressed = compressor.compress(data);
future = stream.sendMessage(compressed, compressedFlag);
} else {
future = stream.sendMessage(data, 0);
}
future.addListener(f -> {
if (!f.isSuccess()) {
cancelDual(TriRpcStatus.CANCELLED
.withDescription("Send message failed")
.withCause(f.cause()));
}
});
}
@Override
public final void onComplete() {
if (listener == null) {
// It will enter here when there is an error in the header
return;
}
// Both 'onError' and 'onComplete' are termination operators.
// The stream will be closed when 'onError' was called, and 'onComplete' is not allowed to be called again.
if (isClosed()) {
return;
}
listener.onComplete();
}
@Override
public final void onMessage(byte[] message, boolean isReturnTriException) {
ClassLoader tccl = Thread.currentThread().getContextClassLoader();
try {
Object instance = parseSingleMessage(message);
listener.onMessage(instance, message.length);
} catch (Exception e) {
final TriRpcStatus status =
TriRpcStatus.UNKNOWN.withDescription("Server error").withCause(e);
close(status, null);
LOGGER.error(
PROTOCOL_FAILED_REQUEST,
"",
"",
"Process request failed. service=" + serviceName + " method=" + methodName,
e);
} finally {
ClassLoadUtil.switchContextLoader(tccl);
}
}
protected abstract Object parseSingleMessage(byte[] data) throws Exception;
@Override
public final void onCancelByRemote(TriRpcStatus status) {
closed = true;
if (listener == null) {
return;
}
cancellationContext.cancel(status.cause);
listener.onCancel(status);
}
// stream listener end
public final boolean isClosed() {
return closed;
}
/**
* Build the RpcInvocation with metadata and execute headerFilter
*
* @return RpcInvocation
*/
protected RpcInvocation buildInvocation(MethodDescriptor methodDescriptor) {
final URL url = invoker.getUrl();
RpcInvocation inv = new RpcInvocation(
url.getServiceModel(),
methodDescriptor.getMethodName(),
serviceDescriptor.getInterfaceName(),
url.getProtocolServiceKey(),
methodDescriptor.getParameterClasses(),
new Object[0]);
inv.setTargetServiceUniqueName(url.getServiceKey());
inv.setReturnTypes(methodDescriptor.getReturnTypes());
inv.setObjectAttachments(StreamUtils.toAttachments(requestMetadata));
inv.put(REMOTE_ADDRESS_KEY, stream.remoteAddress());
// handle timeout
String timeout = (String) requestMetadata.get(TripleHeaderEnum.TIMEOUT.getHeader());
try {
if (Objects.nonNull(timeout)) {
this.timeout = parseTimeoutToMills(timeout);
}
} catch (Throwable t) {
LOGGER.warn(
PROTOCOL_FAILED_PARSE,
"",
"",
String.format(
"Failed to parse request timeout set from:%s, service=%s " + "method=%s",
timeout, serviceDescriptor.getInterfaceName(), methodName));
}
if (null != requestMetadata.get(TripleHeaderEnum.CONSUMER_APP_NAME_KEY.getHeader())) {
inv.put(
TripleHeaderEnum.CONSUMER_APP_NAME_KEY,
requestMetadata.get(TripleHeaderEnum.CONSUMER_APP_NAME_KEY.getHeader()));
}
return inv;
}
private void sendHeader() {
if (closed) {
return;
}
if (headerSent) {
throw new IllegalStateException("Header has already sent");
}
headerSent = true;
DefaultHttp2Headers headers = new DefaultHttp2Headers();
headers.status(HttpResponseStatus.OK.codeAsText());
headers.set(HttpHeaderNames.CONTENT_TYPE, TripleConstant.CONTENT_PROTO);
if (acceptEncoding != null) {
headers.set(HttpHeaderNames.ACCEPT_ENCODING, acceptEncoding);
}
if (compressor != null) {
headers.set(TripleHeaderEnum.GRPC_ENCODING.getHeader(), compressor.getMessageEncoding());
}
if (!exceptionCode.equals(CommonConstants.TRI_EXCEPTION_CODE_NOT_EXISTS)) {
headers.set(TripleHeaderEnum.TRI_EXCEPTION_CODE.getHeader(), String.valueOf(exceptionCode));
}
// send header failed will reset stream and close request observer cause no more data will be sent
stream.sendHeader(headers).addListener(f -> {
if (!f.isSuccess()) {
cancelDual(TriRpcStatus.INTERNAL.withCause(f.cause()));
}
});
}
private void cancelDual(TriRpcStatus status) {
closed = true;
listener.onCancel(status);
cancellationContext.cancel(status.asException());
}
public void cancelByLocal(Throwable throwable) {
if (closed) {
return;
}
closed = true;
cancellationContext.cancel(throwable);
stream.cancelByLocal(TriRpcStatus.CANCELLED.withCause(throwable));
}
public void setCompression(String compression) {
if (headerSent) {
throw new IllegalStateException("Can not set compression after header sent");
}
this.compressor = Compressor.getCompressor(frameworkModel, compression);
}
public void disableAutoRequestN() {
autoRequestN = false;
}
public boolean isAutoRequestN() {
return autoRequestN;
}
public void close(TriRpcStatus status, Map<String, Object> attachments) {
doClose(status, attachments);
}
private void doClose(TriRpcStatus status, Map<String, Object> attachments) {
if (closed) {
return;
}
closed = true;
stream.complete(status, attachments, isNeedReturnException, exceptionCode);
}
protected Long parseTimeoutToMills(String timeoutVal) {
if (StringUtils.isEmpty(timeoutVal) || StringUtils.isContains(timeoutVal, "null")) {
return null;
}
long value = Long.parseLong(timeoutVal.substring(0, timeoutVal.length() - 1));
char unit = timeoutVal.charAt(timeoutVal.length() - 1);
switch (unit) {
case 'n':
return TimeUnit.NANOSECONDS.toMillis(value);
case 'u':
return TimeUnit.MICROSECONDS.toMillis(value);
case 'm':
return value;
case 'S':
return TimeUnit.SECONDS.toMillis(value);
case 'M':
return TimeUnit.MINUTES.toMillis(value);
case 'H':
return TimeUnit.HOURS.toMillis(value);
default:
// invalid timeout config
return null;
}
}
/**
* Error in create stream, unsupported config or triple protocol error.
*
* @param status response status
*/
protected void responseErr(TriRpcStatus status) {
if (closed) {
return;
}
closed = true;
stream.complete(status, null, false, CommonConstants.TRI_EXCEPTION_CODE_NOT_EXISTS);
LOGGER.error(
PROTOCOL_FAILED_REQUEST,
"",
"",
"Triple request error: service=" + serviceName + " method" + methodName,
status.asException());
}
protected ServerCall.Listener startInternalCall(
RpcInvocation invocation, MethodDescriptor methodDescriptor, Invoker<?> invoker) {
this.cancellationContext = RpcContext.getCancellationContext();
ServerCallToObserverAdapter<Object> responseObserver =
new ServerCallToObserverAdapter<>(this, cancellationContext);
try {
ServerCall.Listener listener;
switch (methodDescriptor.getRpcType()) {
case UNARY:
listener = new UnaryServerCallListener(
invocation, invoker, responseObserver, packableMethod.needWrapper());
request(2);
break;
case SERVER_STREAM:
listener = new ServerStreamServerCallListener(invocation, invoker, responseObserver);
request(2);
break;
case BI_STREAM:
case CLIENT_STREAM:
listener = new BiStreamServerCallListener(invocation, invoker, responseObserver);
request(1);
break;
default:
throw new IllegalStateException("Can not reach here");
}
return listener;
} catch (Exception e) {
LOGGER.error(PROTOCOL_FAILED_CREATE_STREAM_TRIPLE, "", "", "Create triple stream failed", e);
responseErr(TriRpcStatus.INTERNAL
.withDescription("Create stream failed")
.withCause(e));
}
return null;
}
}
| 6,156 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/call/StubAbstractServerCall.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.call;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.rpc.Invoker;
import org.apache.dubbo.rpc.model.FrameworkModel;
import org.apache.dubbo.rpc.model.ServiceDescriptor;
import org.apache.dubbo.rpc.model.StubMethodDescriptor;
import org.apache.dubbo.rpc.protocol.tri.stream.ServerStream;
import org.apache.dubbo.rpc.stub.StubSuppliers;
import java.util.concurrent.Executor;
public class StubAbstractServerCall extends AbstractServerCall {
public StubAbstractServerCall(
Invoker<?> invoker,
ServerStream serverStream,
FrameworkModel frameworkModel,
String acceptEncoding,
String serviceName,
String methodName,
Executor executor) {
super(
invoker,
serverStream,
frameworkModel,
getServiceDescriptor(invoker.getUrl(), serviceName),
acceptEncoding,
serviceName,
methodName,
executor);
this.methodDescriptor = serviceDescriptor.getMethods(methodName).get(0);
this.packableMethod = (StubMethodDescriptor) methodDescriptor;
}
private static ServiceDescriptor getServiceDescriptor(URL url, String serviceName) {
ServiceDescriptor serviceDescriptor;
if (url.getServiceModel() != null) {
serviceDescriptor = url.getServiceModel().getServiceModel();
} else {
serviceDescriptor = StubSuppliers.getServiceDescriptor(serviceName);
}
return serviceDescriptor;
}
@Override
protected Object parseSingleMessage(byte[] data) throws Exception {
return packableMethod.parseRequest(data);
}
}
| 6,157 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/call/ClientCall.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.call;
import org.apache.dubbo.common.stream.StreamObserver;
import org.apache.dubbo.rpc.TriRpcStatus;
import org.apache.dubbo.rpc.protocol.tri.RequestMetadata;
import java.util.Map;
/**
* ClientCall does not care about transport layer details.
*/
public interface ClientCall {
/**
* Listener for receive response.
*/
interface Listener {
/**
* Called when the call is started, user can use this to set some configurations.
*
* @param call call implementation
*/
void onStart(ClientCall call);
/**
* Callback when message received.
*
* @param message message received
* @param actualContentLength actual content length from body
*/
void onMessage(Object message, int actualContentLength);
/**
* Callback when call is finished.
*
* @param status response status
* @param trailers response trailers
*/
void onClose(TriRpcStatus status, Map<String, Object> trailers, boolean isReturnTriException);
}
/**
* Send reset to server, no more data will be sent or received.
*
* @param t cause
*/
void cancelByLocal(Throwable t);
/**
* Request max n message from server
*
* @param messageNumber max message number
*/
void request(int messageNumber);
/**
* Send message to server
*
* @param message request to send
*/
void sendMessage(Object message);
/**
* @param metadata request metadata
* @param responseListener the listener to receive response
* @return the stream observer representing the request sink
*/
StreamObserver<Object> start(RequestMetadata metadata, Listener responseListener);
/**
* @return true if this call is auto request
*/
boolean isAutoRequest();
/**
* Set auto request for this call
*
* @param autoRequest whether auto request is enabled
*/
void setAutoRequest(boolean autoRequest);
/**
* No more data will be sent.
*/
void halfClose();
/**
* Set compression algorithm for request.
*
* @param compression compression algorithm
*/
void setCompression(String compression);
}
| 6,158 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/call/TripleClientCall.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.call;
import org.apache.dubbo.common.logger.ErrorTypeAwareLogger;
import org.apache.dubbo.common.logger.LoggerFactory;
import org.apache.dubbo.common.stream.StreamObserver;
import org.apache.dubbo.remoting.api.connection.AbstractConnectionClient;
import org.apache.dubbo.rpc.TriRpcStatus;
import org.apache.dubbo.rpc.model.FrameworkModel;
import org.apache.dubbo.rpc.protocol.tri.RequestMetadata;
import org.apache.dubbo.rpc.protocol.tri.compressor.Compressor;
import org.apache.dubbo.rpc.protocol.tri.compressor.Identity;
import org.apache.dubbo.rpc.protocol.tri.observer.ClientCallToObserverAdapter;
import org.apache.dubbo.rpc.protocol.tri.stream.ClientStream;
import org.apache.dubbo.rpc.protocol.tri.stream.StreamUtils;
import org.apache.dubbo.rpc.protocol.tri.stream.TripleClientStream;
import org.apache.dubbo.rpc.protocol.tri.transport.TripleWriteQueue;
import java.util.Map;
import java.util.concurrent.Executor;
import io.netty.channel.Channel;
import io.netty.handler.codec.http2.Http2Exception;
import static io.netty.handler.codec.http2.Http2Error.FLOW_CONTROL_ERROR;
import static org.apache.dubbo.common.constants.LoggerCodeConstants.PROTOCOL_FAILED_RESPONSE;
import static org.apache.dubbo.common.constants.LoggerCodeConstants.PROTOCOL_FAILED_SERIALIZE_TRIPLE;
import static org.apache.dubbo.common.constants.LoggerCodeConstants.PROTOCOL_STREAM_LISTENER;
public class TripleClientCall implements ClientCall, ClientStream.Listener {
private static final ErrorTypeAwareLogger LOGGER = LoggerFactory.getErrorTypeAwareLogger(TripleClientCall.class);
private final AbstractConnectionClient connectionClient;
private final Executor executor;
private final FrameworkModel frameworkModel;
private final TripleWriteQueue writeQueue;
private RequestMetadata requestMetadata;
private ClientStream stream;
private ClientCall.Listener listener;
private boolean canceled;
private boolean headerSent;
private boolean autoRequest = true;
private boolean done;
private Http2Exception.StreamException streamException;
public TripleClientCall(
AbstractConnectionClient connectionClient,
Executor executor,
FrameworkModel frameworkModel,
TripleWriteQueue writeQueue) {
this.connectionClient = connectionClient;
this.executor = executor;
this.frameworkModel = frameworkModel;
this.writeQueue = writeQueue;
}
// stream listener start
@Override
public void onMessage(byte[] message, boolean isReturnTriException) {
if (done) {
LOGGER.warn(
PROTOCOL_STREAM_LISTENER,
"",
"",
"Received message from closed stream,connection=" + connectionClient + " service="
+ requestMetadata.service + " method="
+ requestMetadata.method.getMethodName());
return;
}
try {
final Object unpacked = requestMetadata.packableMethod.parseResponse(message, isReturnTriException);
listener.onMessage(unpacked, message.length);
} catch (Throwable t) {
TriRpcStatus status = TriRpcStatus.INTERNAL
.withDescription("Deserialize response failed")
.withCause(t);
cancelByLocal(status.asException());
listener.onClose(status, null, false);
LOGGER.error(
PROTOCOL_FAILED_RESPONSE,
"",
"",
String.format(
"Failed to deserialize triple response, service=%s, method=%s,connection=%s",
connectionClient, requestMetadata.service, requestMetadata.method.getMethodName()),
t);
}
}
@Override
public void onCancelByRemote(TriRpcStatus status) {
if (canceled) {
return;
}
canceled = true;
if (requestMetadata.cancellationContext != null) {
requestMetadata.cancellationContext.cancel(status.asException());
}
onComplete(status, null, null, false);
}
@Override
public void onComplete(
TriRpcStatus status,
Map<String, Object> attachments,
Map<String, String> excludeHeaders,
boolean isReturnTriException) {
if (done) {
return;
}
done = true;
try {
listener.onClose(status, StreamUtils.toAttachments(attachments), isReturnTriException);
} catch (Throwable t) {
cancelByLocal(TriRpcStatus.INTERNAL
.withDescription("Close stream error")
.withCause(t)
.asException());
}
if (requestMetadata.cancellationContext != null) {
requestMetadata.cancellationContext.cancel(null);
}
}
@Override
public void onStart() {
listener.onStart(TripleClientCall.this);
}
@Override
public void cancelByLocal(Throwable t) {
if (canceled) {
return;
}
// did not create stream
if (!headerSent) {
return;
}
canceled = true;
if (stream == null) {
return;
}
if (t instanceof Http2Exception.StreamException
&& ((Http2Exception.StreamException) t).error().equals(FLOW_CONTROL_ERROR)) {
TriRpcStatus status = TriRpcStatus.CANCELLED
.withCause(t)
.withDescription("Due flowcontrol over pendingbytes, Cancelled by client");
stream.cancelByLocal(status);
streamException = (Http2Exception.StreamException) t;
} else {
TriRpcStatus status = TriRpcStatus.CANCELLED.withCause(t).withDescription("Cancelled by client");
stream.cancelByLocal(status);
}
TriRpcStatus status = TriRpcStatus.CANCELLED.withCause(t).withDescription("Cancelled by client");
stream.cancelByLocal(status);
if (requestMetadata.cancellationContext != null) {
requestMetadata.cancellationContext.cancel(t);
}
}
@Override
public void request(int messageNumber) {
stream.request(messageNumber);
}
@Override
public void sendMessage(Object message) {
if (canceled && null != streamException) {
throw new IllegalStateException("Due flowcontrol over pendingbytes, Call already canceled");
} else if (canceled) {
throw new IllegalStateException("Call already canceled");
}
if (!headerSent) {
headerSent = true;
stream.sendHeader(requestMetadata.toHeaders());
}
final byte[] data;
try {
data = requestMetadata.packableMethod.packRequest(message);
int compressed = Identity.MESSAGE_ENCODING.equals(requestMetadata.compressor.getMessageEncoding()) ? 0 : 1;
final byte[] compress = requestMetadata.compressor.compress(data);
stream.sendMessage(compress, compressed, false).addListener(f -> {
if (!f.isSuccess()) {
cancelByLocal(f.cause());
}
});
} catch (Throwable t) {
LOGGER.error(
PROTOCOL_FAILED_SERIALIZE_TRIPLE,
"",
"",
String.format(
"Serialize triple request failed, service=%s method=%s",
requestMetadata.service, requestMetadata.method.getMethodName()),
t);
cancelByLocal(t);
listener.onClose(
TriRpcStatus.INTERNAL
.withDescription("Serialize request failed")
.withCause(t),
null,
false);
}
}
// stream listener end
@Override
public void halfClose() {
if (!headerSent) {
return;
}
if (canceled) {
return;
}
stream.halfClose().addListener(f -> {
if (!f.isSuccess()) {
cancelByLocal(new IllegalStateException("Half close failed", f.cause()));
}
});
}
@Override
public void setCompression(String compression) {
this.requestMetadata.compressor = Compressor.getCompressor(frameworkModel, compression);
}
@Override
public StreamObserver<Object> start(RequestMetadata metadata, ClientCall.Listener responseListener) {
this.requestMetadata = metadata;
this.listener = responseListener;
this.stream = new TripleClientStream(
frameworkModel, executor, (Channel) connectionClient.getChannel(true), this, writeQueue);
return new ClientCallToObserverAdapter<>(this);
}
@Override
public boolean isAutoRequest() {
return autoRequest;
}
@Override
public void setAutoRequest(boolean autoRequest) {
this.autoRequest = autoRequest;
}
}
| 6,159 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/call/UnaryClientCallListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.call;
import org.apache.dubbo.remoting.Constants;
import org.apache.dubbo.rpc.AppResponse;
import org.apache.dubbo.rpc.TriRpcStatus;
import org.apache.dubbo.rpc.protocol.tri.DeadlineFuture;
import java.util.Map;
public class UnaryClientCallListener implements ClientCall.Listener {
private final DeadlineFuture future;
private Object appResponse;
private int actualContentLength;
public UnaryClientCallListener(DeadlineFuture deadlineFuture) {
this.future = deadlineFuture;
}
@Override
public void onMessage(Object message, int actualContentLength) {
this.appResponse = message;
this.actualContentLength = actualContentLength;
}
@Override
public void onClose(TriRpcStatus status, Map<String, Object> trailers, boolean isReturnTriException) {
AppResponse result = new AppResponse();
result.setObjectAttachments(trailers);
if (status.isOk()) {
if (isReturnTriException) {
result.setException((Exception) appResponse);
} else {
result.setValue(appResponse);
}
} else {
result.setException(status.asException());
}
result.setAttribute(Constants.CONTENT_LENGTH_KEY, actualContentLength);
future.received(status, result);
}
@Override
public void onStart(ClientCall call) {
future.addTimeoutListener(() -> call.cancelByLocal(new IllegalStateException("client timeout")));
call.request(2);
}
}
| 6,160 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/call/BiStreamServerCallListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.call;
import org.apache.dubbo.common.stream.StreamObserver;
import org.apache.dubbo.rpc.Invoker;
import org.apache.dubbo.rpc.RpcInvocation;
import org.apache.dubbo.rpc.TriRpcStatus;
import org.apache.dubbo.rpc.protocol.tri.observer.ServerCallToObserverAdapter;
public class BiStreamServerCallListener extends AbstractServerCallListener {
private StreamObserver<Object> requestObserver;
public BiStreamServerCallListener(
RpcInvocation invocation, Invoker<?> invoker, ServerCallToObserverAdapter<Object> responseObserver) {
super(invocation, invoker, responseObserver);
invocation.setArguments(new Object[] {responseObserver});
invoke();
}
@Override
public void onReturn(Object value) {
this.requestObserver = (StreamObserver<Object>) value;
}
@Override
public void onMessage(Object message, int actualContentLength) {
if (message instanceof Object[]) {
message = ((Object[]) message)[0];
}
requestObserver.onNext(message);
if (responseObserver.isAutoRequestN()) {
responseObserver.request(1);
}
}
@Override
public void onCancel(TriRpcStatus status) {
requestObserver.onError(status.asException());
}
@Override
public void onComplete() {
requestObserver.onCompleted();
}
}
| 6,161 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/call/ServerStreamServerCallListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.call;
import org.apache.dubbo.rpc.Invoker;
import org.apache.dubbo.rpc.RpcInvocation;
import org.apache.dubbo.rpc.TriRpcStatus;
import org.apache.dubbo.rpc.protocol.tri.observer.ServerCallToObserverAdapter;
public class ServerStreamServerCallListener extends AbstractServerCallListener {
public ServerStreamServerCallListener(
RpcInvocation invocation, Invoker<?> invoker, ServerCallToObserverAdapter<Object> responseObserver) {
super(invocation, invoker, responseObserver);
}
@Override
public void onReturn(Object value) {}
@Override
public void onMessage(Object message, int actualContentLength) {
if (message instanceof Object[]) {
message = ((Object[]) message)[0];
}
invocation.setArguments(new Object[] {message, responseObserver});
}
@Override
public void onCancel(TriRpcStatus status) {
responseObserver.onError(status.asException());
}
@Override
public void onComplete() {
invoke();
}
}
| 6,162 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/call/ServerCall.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.call;
import org.apache.dubbo.rpc.TriRpcStatus;
import java.util.Map;
/**
* ServerCall manipulates server details of a RPC call. Request messages are acquired by {@link
* Listener}. Backpressure is supported by {@link #request(int)}.Response messages are sent by
* {@link ServerCall#sendMessage(Object)}.
*/
public interface ServerCall {
/**
* A listener to receive request messages.
*/
interface Listener {
/**
* Callback when a request message is received.
*
* @param message message received
* @param actualContentLength actual content length from body
*/
void onMessage(Object message, int actualContentLength);
/**
* @param status when the call is canceled.
*/
void onCancel(TriRpcStatus status);
/**
* Request completed.
*/
void onComplete();
}
/**
* Send message to client
*
* @param message message to send
*/
void sendMessage(Object message);
/**
* Request more request data from the client.
*
* @param numMessages max number of messages
*/
void request(int numMessages);
/**
* Close the call.
*
* @param status status of the call to send to the client
* @param responseAttrs response attachments
*/
void close(TriRpcStatus status, Map<String, Object> responseAttrs);
}
| 6,163 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/call/ReflectionAbstractServerCall.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.call;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.common.config.ConfigurationUtils;
import org.apache.dubbo.common.constants.CommonConstants;
import org.apache.dubbo.common.utils.CollectionUtils;
import org.apache.dubbo.rpc.HeaderFilter;
import org.apache.dubbo.rpc.Invoker;
import org.apache.dubbo.rpc.RpcInvocation;
import org.apache.dubbo.rpc.TriRpcStatus;
import org.apache.dubbo.rpc.model.FrameworkModel;
import org.apache.dubbo.rpc.model.MethodDescriptor;
import org.apache.dubbo.rpc.model.MethodDescriptor.RpcType;
import org.apache.dubbo.rpc.model.PackableMethod;
import org.apache.dubbo.rpc.model.PackableMethodFactory;
import org.apache.dubbo.rpc.model.ProviderModel;
import org.apache.dubbo.rpc.model.ServiceDescriptor;
import org.apache.dubbo.rpc.protocol.tri.ClassLoadUtil;
import org.apache.dubbo.rpc.protocol.tri.TripleCustomerProtocolWapper;
import org.apache.dubbo.rpc.protocol.tri.stream.ServerStream;
import org.apache.dubbo.rpc.service.ServiceDescriptorInternalCache;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executor;
import io.netty.handler.codec.http.HttpHeaderNames;
import static org.apache.dubbo.common.constants.CommonConstants.DEFAULT_KEY;
import static org.apache.dubbo.common.constants.CommonConstants.DUBBO_PACKABLE_METHOD_FACTORY;
public class ReflectionAbstractServerCall extends AbstractServerCall {
private static final String PACKABLE_METHOD_CACHE = "PACKABLE_METHOD_CACHE";
private final List<HeaderFilter> headerFilters;
private List<MethodDescriptor> methodDescriptors;
public ReflectionAbstractServerCall(
Invoker<?> invoker,
ServerStream serverStream,
FrameworkModel frameworkModel,
String acceptEncoding,
String serviceName,
String methodName,
List<HeaderFilter> headerFilters,
Executor executor) {
super(
invoker,
serverStream,
frameworkModel,
getServiceDescriptor(invoker.getUrl()),
acceptEncoding,
serviceName,
methodName,
executor);
this.headerFilters = headerFilters;
}
private static ServiceDescriptor getServiceDescriptor(URL url) {
ProviderModel providerModel = (ProviderModel) url.getServiceModel();
if (providerModel == null || providerModel.getServiceModel() == null) {
return null;
}
return providerModel.getServiceModel();
}
private boolean isEcho(String methodName) {
return CommonConstants.$ECHO.equals(methodName);
}
private boolean isGeneric(String methodName) {
return CommonConstants.$INVOKE.equals(methodName) || CommonConstants.$INVOKE_ASYNC.equals(methodName);
}
@Override
public void startCall() {
if (isGeneric(methodName)) {
// There should be one and only one
methodDescriptor = ServiceDescriptorInternalCache.genericService()
.getMethods(methodName)
.get(0);
} else if (isEcho(methodName)) {
// There should be one and only one
methodDescriptor = ServiceDescriptorInternalCache.echoService()
.getMethods(methodName)
.get(0);
} else {
methodDescriptors = serviceDescriptor.getMethods(methodName);
// try lower-case method
if (CollectionUtils.isEmpty(methodDescriptors)) {
final String lowerMethod = Character.toLowerCase(methodName.charAt(0)) + methodName.substring(1);
methodDescriptors = serviceDescriptor.getMethods(lowerMethod);
}
if (CollectionUtils.isEmpty(methodDescriptors)) {
responseErr(TriRpcStatus.UNIMPLEMENTED.withDescription(
"Method : " + methodName + " not found of service:" + serviceName));
return;
}
// In most cases there is only one method
if (methodDescriptors.size() == 1) {
methodDescriptor = methodDescriptors.get(0);
}
// generated unary method ,use unary type
// Response foo(Request)
// void foo(Request,StreamObserver<Response>)
if (methodDescriptors.size() == 2) {
if (methodDescriptors.get(1).getRpcType() == RpcType.SERVER_STREAM) {
methodDescriptor = methodDescriptors.get(0);
} else if (methodDescriptors.get(0).getRpcType() == RpcType.SERVER_STREAM) {
methodDescriptor = methodDescriptors.get(1);
}
}
}
if (methodDescriptor != null) {
loadPackableMethod(invoker.getUrl());
}
trySetListener();
if (listener == null) {
// wrap request , need one message
request(1);
}
}
private void trySetListener() {
if (listener != null) {
return;
}
if (methodDescriptor == null) {
return;
}
if (isClosed()) {
return;
}
RpcInvocation invocation = buildInvocation(methodDescriptor);
if (isClosed()) {
return;
}
headerFilters.forEach(f -> f.invoke(invoker, invocation));
if (isClosed()) {
return;
}
listener = ReflectionAbstractServerCall.this.startInternalCall(invocation, methodDescriptor, invoker);
}
@Override
protected Object parseSingleMessage(byte[] data) throws Exception {
trySetMethodDescriptor(data);
trySetListener();
if (isClosed()) {
return null;
}
ClassLoadUtil.switchContextLoader(invoker.getUrl().getServiceModel().getClassLoader());
return packableMethod.getRequestUnpack().unpack(data);
}
private void trySetMethodDescriptor(byte[] data) {
if (methodDescriptor != null) {
return;
}
final TripleCustomerProtocolWapper.TripleRequestWrapper request;
request = TripleCustomerProtocolWapper.TripleRequestWrapper.parseFrom(data);
final String[] paramTypes =
request.getArgTypes().toArray(new String[request.getArgs().size()]);
// wrapper mode the method can overload so maybe list
for (MethodDescriptor descriptor : methodDescriptors) {
// params type is array
if (Arrays.equals(descriptor.getCompatibleParamSignatures(), paramTypes)) {
methodDescriptor = descriptor;
break;
}
}
if (methodDescriptor == null) {
ReflectionAbstractServerCall.this.close(
TriRpcStatus.UNIMPLEMENTED.withDescription(
"Method :" + methodName + "[" + Arrays.toString(paramTypes) + "] " + "not found of service:"
+ serviceDescriptor.getInterfaceName()),
null);
return;
}
loadPackableMethod(invoker.getUrl());
}
@SuppressWarnings("unchecked")
private void loadPackableMethod(URL url) {
Map<MethodDescriptor, PackableMethod> cacheMap = (Map<MethodDescriptor, PackableMethod>) url.getServiceModel()
.getServiceMetadata()
.getAttributeMap()
.computeIfAbsent(PACKABLE_METHOD_CACHE, (k) -> new ConcurrentHashMap<>());
packableMethod = cacheMap.computeIfAbsent(methodDescriptor, (md) -> frameworkModel
.getExtensionLoader(PackableMethodFactory.class)
.getExtension(ConfigurationUtils.getGlobalConfiguration(url.getApplicationModel())
.getString(DUBBO_PACKABLE_METHOD_FACTORY, DEFAULT_KEY))
.create(methodDescriptor, url, (String) requestMetadata.get(HttpHeaderNames.CONTENT_TYPE.toString())));
}
}
| 6,164 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/call/UnaryServerCallListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.call;
import org.apache.dubbo.remoting.Constants;
import org.apache.dubbo.rpc.Invoker;
import org.apache.dubbo.rpc.RpcException;
import org.apache.dubbo.rpc.RpcInvocation;
import org.apache.dubbo.rpc.TriRpcStatus;
import org.apache.dubbo.rpc.protocol.tri.observer.ServerCallToObserverAdapter;
public class UnaryServerCallListener extends AbstractServerCallListener {
private final boolean needWrapper;
public UnaryServerCallListener(
RpcInvocation invocation,
Invoker<?> invoker,
ServerCallToObserverAdapter<Object> responseObserver,
boolean needWrapper) {
super(invocation, invoker, responseObserver);
this.needWrapper = needWrapper;
}
@Override
public void onReturn(Object value) {
responseObserver.onNext(value);
responseObserver.onCompleted();
}
@Override
public void onMessage(Object message, int actualContentLength) {
if (message instanceof Object[]) {
invocation.setArguments((Object[]) message);
} else {
invocation.setArguments(new Object[] {message});
}
invocation.put(Constants.CONTENT_LENGTH_KEY, actualContentLength);
}
@Override
public void onCancel(TriRpcStatus status) {
// ignored
}
@Override
protected void doOnResponseHasException(Throwable t) {
if (needWrapper) {
onReturnException((Exception) t);
} else {
super.doOnResponseHasException(t);
}
}
private void onReturnException(Exception value) {
TriRpcStatus status = TriRpcStatus.getStatus(value);
int exceptionCode = status.code.code;
if (exceptionCode == TriRpcStatus.UNKNOWN.code.code) {
exceptionCode = RpcException.BIZ_EXCEPTION;
}
responseObserver.setExceptionCode(exceptionCode);
responseObserver.setNeedReturnException(true);
onReturn(value);
}
@Override
public void onComplete() {
invoke();
}
}
| 6,165 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/command/CancelQueueCommand.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.command;
import org.apache.dubbo.rpc.protocol.tri.stream.TripleStreamChannelFuture;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPromise;
import io.netty.handler.codec.http2.DefaultHttp2ResetFrame;
import io.netty.handler.codec.http2.Http2Error;
public class CancelQueueCommand extends StreamQueueCommand {
private final Http2Error error;
public CancelQueueCommand(TripleStreamChannelFuture streamChannelFuture, Http2Error error) {
super(streamChannelFuture);
this.error = error;
}
public static CancelQueueCommand createCommand(TripleStreamChannelFuture streamChannelFuture, Http2Error error) {
return new CancelQueueCommand(streamChannelFuture, error);
}
@Override
public void doSend(ChannelHandlerContext ctx, ChannelPromise promise) {
ctx.write(new DefaultHttp2ResetFrame(error), promise);
}
}
| 6,166 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/command/CreateStreamQueueCommand.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.command;
import org.apache.dubbo.rpc.protocol.tri.stream.TripleStreamChannelFuture;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPromise;
import io.netty.handler.codec.http2.Http2StreamChannel;
import io.netty.handler.codec.http2.Http2StreamChannelBootstrap;
import io.netty.util.concurrent.Future;
public class CreateStreamQueueCommand extends QueuedCommand {
private final Http2StreamChannelBootstrap bootstrap;
private final TripleStreamChannelFuture streamChannelFuture;
private CreateStreamQueueCommand(
Http2StreamChannelBootstrap bootstrap, TripleStreamChannelFuture streamChannelFuture) {
this.bootstrap = bootstrap;
this.streamChannelFuture = streamChannelFuture;
this.promise(streamChannelFuture.getParentChannel().newPromise());
this.channel(streamChannelFuture.getParentChannel());
}
public static CreateStreamQueueCommand create(
Http2StreamChannelBootstrap bootstrap, TripleStreamChannelFuture streamChannelFuture) {
return new CreateStreamQueueCommand(bootstrap, streamChannelFuture);
}
@Override
public void doSend(ChannelHandlerContext ctx, ChannelPromise promise) {
// NOOP
}
@Override
public void run(Channel channel) {
// work in I/O thread
Future<Http2StreamChannel> future = bootstrap.open();
if (future.isSuccess()) {
streamChannelFuture.complete(future.getNow());
} else {
streamChannelFuture.completeExceptionally(future.cause());
}
}
}
| 6,167 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/command/TextDataQueueCommand.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.command;
import org.apache.dubbo.rpc.protocol.tri.stream.TripleStreamChannelFuture;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufUtil;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPromise;
import io.netty.handler.codec.http2.DefaultHttp2DataFrame;
public class TextDataQueueCommand extends StreamQueueCommand {
private final String data;
private final boolean endStream;
private TextDataQueueCommand(TripleStreamChannelFuture streamChannelFuture, String text, boolean endStream) {
super(streamChannelFuture);
this.data = text;
this.endStream = endStream;
}
public static TextDataQueueCommand createCommand(
TripleStreamChannelFuture streamChannelFuture, String data, boolean endStream) {
return new TextDataQueueCommand(streamChannelFuture, data, endStream);
}
@Override
public void doSend(ChannelHandlerContext ctx, ChannelPromise promise) {
ByteBuf buf = ByteBufUtil.writeUtf8(ctx.alloc(), data);
ctx.write(new DefaultHttp2DataFrame(buf, endStream), promise);
}
}
| 6,168 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/command/DataQueueCommand.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.command;
import org.apache.dubbo.rpc.protocol.tri.stream.TripleStreamChannelFuture;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPromise;
import io.netty.handler.codec.http2.DefaultHttp2DataFrame;
public class DataQueueCommand extends StreamQueueCommand {
private final byte[] data;
private final int compressFlag;
private final boolean endStream;
private DataQueueCommand(
TripleStreamChannelFuture streamChannelFuture, byte[] data, int compressFlag, boolean endStream) {
super(streamChannelFuture);
this.data = data;
this.compressFlag = compressFlag;
this.endStream = endStream;
}
public static DataQueueCommand create(
TripleStreamChannelFuture streamChannelFuture, byte[] data, boolean endStream, int compressFlag) {
return new DataQueueCommand(streamChannelFuture, data, compressFlag, endStream);
}
@Override
public void doSend(ChannelHandlerContext ctx, ChannelPromise promise) {
if (data == null) {
ctx.write(new DefaultHttp2DataFrame(endStream), promise);
} else {
ByteBuf buf = ctx.alloc().buffer();
buf.writeByte(compressFlag);
buf.writeInt(data.length);
buf.writeBytes(data);
ctx.write(new DefaultHttp2DataFrame(buf, endStream), promise);
}
}
// for test
public byte[] getData() {
return data;
}
// for test
public boolean isEndStream() {
return endStream;
}
}
| 6,169 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/command/HeaderQueueCommand.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.command;
import org.apache.dubbo.rpc.protocol.tri.stream.TripleStreamChannelFuture;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPromise;
import io.netty.handler.codec.http2.DefaultHttp2HeadersFrame;
import io.netty.handler.codec.http2.Http2Headers;
public class HeaderQueueCommand extends StreamQueueCommand {
private final Http2Headers headers;
private final boolean endStream;
private HeaderQueueCommand(TripleStreamChannelFuture streamChannelFuture, Http2Headers headers, boolean endStream) {
super(streamChannelFuture);
this.headers = headers;
this.endStream = endStream;
}
public static HeaderQueueCommand createHeaders(
TripleStreamChannelFuture streamChannelFuture, Http2Headers headers) {
return new HeaderQueueCommand(streamChannelFuture, headers, false);
}
public static HeaderQueueCommand createHeaders(
TripleStreamChannelFuture streamChannelFuture, Http2Headers headers, boolean endStream) {
return new HeaderQueueCommand(streamChannelFuture, headers, endStream);
}
public Http2Headers getHeaders() {
return headers;
}
public boolean isEndStream() {
return endStream;
}
@Override
public void doSend(ChannelHandlerContext ctx, ChannelPromise promise) {
ctx.write(new DefaultHttp2HeadersFrame(headers, endStream), promise);
}
}
| 6,170 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/command/QueuedCommand.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.command;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPromise;
public abstract class QueuedCommand {
protected Channel channel;
private ChannelPromise promise;
public ChannelPromise promise() {
return promise;
}
public void promise(ChannelPromise promise) {
this.promise = promise;
}
public void cancel() {
promise.tryFailure(new IllegalStateException("Canceled"));
}
public void run(Channel channel) {
if (channel.isActive()) {
channel.write(this).addListener(future -> {
if (future.isSuccess()) {
promise.setSuccess();
} else {
promise.setFailure(future.cause());
}
});
} else {
promise.trySuccess();
}
}
public final void send(ChannelHandlerContext ctx, ChannelPromise promise) {
if (ctx.channel().isActive()) {
doSend(ctx, promise);
}
}
public QueuedCommand channel(Channel channel) {
this.channel = channel;
return this;
}
public Channel channel() {
return channel;
}
public abstract void doSend(ChannelHandlerContext ctx, ChannelPromise promise);
}
| 6,171 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/command/StreamQueueCommand.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.command;
import org.apache.dubbo.common.utils.Assert;
import org.apache.dubbo.rpc.protocol.tri.stream.TripleStreamChannelFuture;
import io.netty.channel.Channel;
public abstract class StreamQueueCommand extends QueuedCommand {
protected final TripleStreamChannelFuture streamChannelFuture;
protected StreamQueueCommand(TripleStreamChannelFuture streamChannelFuture) {
Assert.notNull(streamChannelFuture, "streamChannelFuture cannot be null.");
this.streamChannelFuture = streamChannelFuture;
this.promise(streamChannelFuture.getParentChannel().newPromise());
}
@Override
public void run(Channel channel) {
if (streamChannelFuture.isSuccess()) {
super.run(channel);
return;
}
promise().setFailure(streamChannelFuture.cause());
}
@Override
public Channel channel() {
return this.streamChannelFuture.getNow();
}
}
| 6,172 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/command/EndStreamQueueCommand.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.command;
import org.apache.dubbo.rpc.protocol.tri.stream.TripleStreamChannelFuture;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPromise;
import io.netty.handler.codec.http2.DefaultHttp2DataFrame;
public class EndStreamQueueCommand extends StreamQueueCommand {
public EndStreamQueueCommand(TripleStreamChannelFuture streamChannelFuture) {
super(streamChannelFuture);
}
public static EndStreamQueueCommand create(TripleStreamChannelFuture streamChannelFuture) {
return new EndStreamQueueCommand(streamChannelFuture);
}
@Override
public void doSend(ChannelHandlerContext ctx, ChannelPromise promise) {
ctx.write(new DefaultHttp2DataFrame(true), promise);
}
}
| 6,173 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/service/SchemaDescriptorRegistry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.service;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import com.google.protobuf.Descriptors.Descriptor;
import com.google.protobuf.Descriptors.FieldDescriptor;
import com.google.protobuf.Descriptors.FileDescriptor;
public class SchemaDescriptorRegistry {
private static final Map<String, FileDescriptor> DESCRIPTORS_BY_SYMBOL = new ConcurrentHashMap<>();
private static final Map<String, Map<Integer, FileDescriptor>> EXTENSIONS = new ConcurrentHashMap<>();
private static final Set<String> SERVICES = new HashSet<>();
public static void addSchemaDescriptor(String serviceName, FileDescriptor fd) {
SERVICES.add(serviceName);
DESCRIPTORS_BY_SYMBOL.put(serviceName, fd);
for (Descriptor messageType : fd.getMessageTypes()) {
addType(messageType);
}
for (FieldDescriptor extension : fd.getExtensions()) {
addExtension(extension, fd);
}
}
private static void addType(Descriptor descriptor) {
DESCRIPTORS_BY_SYMBOL.put(descriptor.getFullName(), descriptor.getFile());
for (Descriptor nestedType : descriptor.getNestedTypes()) {
addType(nestedType);
}
}
private static void addExtension(FieldDescriptor extension, FileDescriptor fd) {
String name = extension.getContainingType().getFullName();
int number = extension.getNumber();
if (!EXTENSIONS.containsKey(name)) {
EXTENSIONS.put(name, new HashMap<>());
}
Map<Integer, FileDescriptor> fdMap = EXTENSIONS.get(name);
fdMap.put(number, fd);
}
public static FileDescriptor getFileDescriptorByExtensionAndNumber(String extension, int number) {
return EXTENSIONS.getOrDefault(extension, Collections.emptyMap()).get(number);
}
public static Set<Integer> getExtensionNumbers(String extension) {
Map<Integer, FileDescriptor> ret = EXTENSIONS.get(extension);
if (ret == null) {
return null;
} else {
return ret.keySet();
}
}
public static FileDescriptor getSchemaDescriptor(String serviceName) {
return DESCRIPTORS_BY_SYMBOL.get(serviceName);
}
public static List<String> listServiceNames() {
return new ArrayList<>(SERVICES);
}
}
| 6,174 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/service/HealthStatusManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.service;
import io.grpc.health.v1.Health;
import io.grpc.health.v1.HealthCheckResponse;
public class HealthStatusManager {
/**
* The special "service name" that represent all services on a GRPC server. It is an empty
* string.
*/
public static final String SERVICE_NAME_ALL_SERVICES = "";
private final TriHealthImpl healthService;
public HealthStatusManager(TriHealthImpl healthService) {
this.healthService = healthService;
}
public Health getHealthService() {
return healthService;
}
/**
* Updates the status of the server.
*
* @param service the name of some aspect of the server that is associated with a health status.
* This name can have no relation with the gRPC services that the server is
* running with. It can also be an empty String {@code ""} per the gRPC
* specification.
* @param status is one of the values {@link HealthCheckResponse.ServingStatus#SERVING}, {@link
* HealthCheckResponse.ServingStatus#NOT_SERVING} and {@link
* HealthCheckResponse.ServingStatus#UNKNOWN}.
*/
public void setStatus(String service, HealthCheckResponse.ServingStatus status) {
healthService.setStatus(service, status);
}
/**
* Clears the health status record of a service. The health service will respond with NOT_FOUND
* error on checking the status of a cleared service.
*
* @param service the name of some aspect of the server that is associated with a health status.
* This name can have no relation with the gRPC services that the server is
* running with. It can also be an empty String {@code ""} per the gRPC
* specification.
*/
public void clearStatus(String service) {
healthService.clearStatus(service);
}
/**
* enterTerminalState causes the health status manager to mark all services as not serving, and
* prevents future updates to services. This method is meant to be called prior to server
* shutdown as a way to indicate that clients should redirect their traffic elsewhere.
*/
public void enterTerminalState() {
healthService.enterTerminalState();
}
}
| 6,175 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/service/TriBuiltinService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.service;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.common.config.Configuration;
import org.apache.dubbo.common.config.ConfigurationUtils;
import org.apache.dubbo.common.constants.CommonConstants;
import org.apache.dubbo.common.url.component.ServiceConfigURL;
import org.apache.dubbo.rpc.Invoker;
import org.apache.dubbo.rpc.PathResolver;
import org.apache.dubbo.rpc.ProxyFactory;
import org.apache.dubbo.rpc.model.ApplicationModel;
import org.apache.dubbo.rpc.model.FrameworkModel;
import org.apache.dubbo.rpc.model.ModuleModel;
import java.util.concurrent.atomic.AtomicBoolean;
import io.grpc.health.v1.DubboHealthTriple;
import io.grpc.health.v1.Health;
import static org.apache.dubbo.common.constants.CommonConstants.ANYHOST_VALUE;
import static org.apache.dubbo.rpc.Constants.PROXY_KEY;
import static org.apache.dubbo.rpc.Constants.TRI_BUILTIN_SERVICE_INIT;
/**
* tri internal service like grpc internal service
**/
public class TriBuiltinService {
private ProxyFactory proxyFactory;
private PathResolver pathResolver;
private Health healthService;
private FrameworkModel frameworkModel;
private ReflectionV1AlphaService reflectionServiceV1Alpha;
private HealthStatusManager healthStatusManager;
private Configuration config = ConfigurationUtils.getGlobalConfiguration(ApplicationModel.defaultModel());
private final AtomicBoolean init = new AtomicBoolean();
public TriBuiltinService(FrameworkModel frameworkModel) {
this.frameworkModel = frameworkModel;
if (enable()) {
init();
}
}
public void init() {
if (init.compareAndSet(false, true)) {
healthStatusManager = new HealthStatusManager(new TriHealthImpl());
healthService = healthStatusManager.getHealthService();
reflectionServiceV1Alpha = new ReflectionV1AlphaService();
proxyFactory = frameworkModel.getExtensionLoader(ProxyFactory.class).getAdaptiveExtension();
pathResolver = frameworkModel.getExtensionLoader(PathResolver.class).getDefaultExtension();
addSingleBuiltinService(DubboHealthTriple.SERVICE_NAME, healthService, Health.class);
addSingleBuiltinService(
ReflectionV1AlphaService.SERVICE_NAME, reflectionServiceV1Alpha, ReflectionV1AlphaService.class);
}
}
public boolean enable() {
return config.getBoolean(TRI_BUILTIN_SERVICE_INIT, false);
}
private <T> void addSingleBuiltinService(String serviceName, T impl, Class<T> interfaceClass) {
ModuleModel internalModule = ApplicationModel.defaultModel().getInternalModule();
URL url = new ServiceConfigURL(CommonConstants.TRIPLE, null, null, ANYHOST_VALUE, 0, serviceName)
.addParameter(PROXY_KEY, CommonConstants.NATIVE_STUB)
.setScopeModel(internalModule);
Invoker<?> invoker = proxyFactory.getInvoker(impl, interfaceClass, url);
pathResolver.add(serviceName, invoker);
internalModule.addDestroyListener(scopeModel -> pathResolver.remove(serviceName));
}
public HealthStatusManager getHealthStatusManager() {
return healthStatusManager;
}
}
| 6,176 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/service/ReflectionV1AlphaService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.service;
import org.apache.dubbo.common.stream.StreamObserver;
import org.apache.dubbo.rpc.TriRpcStatus;
import java.util.ArrayDeque;
import java.util.HashSet;
import java.util.Queue;
import java.util.Set;
import com.google.protobuf.Descriptors.FileDescriptor;
import io.grpc.reflection.v1alpha.DubboServerReflectionTriple;
import io.grpc.reflection.v1alpha.ErrorResponse;
import io.grpc.reflection.v1alpha.ExtensionNumberResponse;
import io.grpc.reflection.v1alpha.ExtensionRequest;
import io.grpc.reflection.v1alpha.FileDescriptorResponse;
import io.grpc.reflection.v1alpha.ListServiceResponse;
import io.grpc.reflection.v1alpha.ServerReflectionRequest;
import io.grpc.reflection.v1alpha.ServerReflectionResponse;
import io.grpc.reflection.v1alpha.ServiceResponse;
/**
* Provides a reflection service for Protobuf service for service test and dynamic gateway.
*
* @link https://github.com/grpc/grpc/blob/master/doc/server-reflection.md
*/
public class ReflectionV1AlphaService extends DubboServerReflectionTriple.ServerReflectionImplBase {
@Override
public StreamObserver<ServerReflectionRequest> serverReflectionInfo(
StreamObserver<ServerReflectionResponse> responseObserver) {
return new StreamObserver<ServerReflectionRequest>() {
@Override
public void onNext(ServerReflectionRequest request) {
switch (request.getMessageRequestCase()) {
case FILE_BY_FILENAME:
getFileByName(request, responseObserver);
break;
case FILE_CONTAINING_SYMBOL:
getFileContainingSymbol(request, responseObserver);
break;
case FILE_CONTAINING_EXTENSION:
getFileByExtension(request, responseObserver);
break;
case ALL_EXTENSION_NUMBERS_OF_TYPE:
getAllExtensions(request, responseObserver);
break;
case LIST_SERVICES:
listServices(request, responseObserver);
break;
default:
sendErrorResponse(
request,
TriRpcStatus.Code.UNIMPLEMENTED,
"not implemented " + request.getMessageRequestCase(),
responseObserver);
}
}
@Override
public void onError(Throwable throwable) {
responseObserver.onError(throwable);
}
@Override
public void onCompleted() {
responseObserver.onCompleted();
}
};
}
private void getFileByName(
ServerReflectionRequest request, StreamObserver<ServerReflectionResponse> responseObserver) {
String name = request.getFileByFilename();
FileDescriptor fd = SchemaDescriptorRegistry.getSchemaDescriptor(name);
if (fd != null) {
responseObserver.onNext(createServerReflectionResponse(request, fd));
} else {
sendErrorResponse(request, TriRpcStatus.Code.NOT_FOUND, "File not found.", responseObserver);
}
}
private void getFileContainingSymbol(
ServerReflectionRequest request, StreamObserver<ServerReflectionResponse> responseObserver) {
String symbol = request.getFileContainingSymbol();
FileDescriptor fd = SchemaDescriptorRegistry.getSchemaDescriptor(symbol);
if (fd != null) {
responseObserver.onNext(createServerReflectionResponse(request, fd));
} else {
sendErrorResponse(request, TriRpcStatus.Code.NOT_FOUND, "Symbol not found.", responseObserver);
}
}
private void getFileByExtension(
ServerReflectionRequest request, StreamObserver<ServerReflectionResponse> responseObserver) {
ExtensionRequest extensionRequest = request.getFileContainingExtension();
String type = extensionRequest.getContainingType();
int extension = extensionRequest.getExtensionNumber();
FileDescriptor fd = SchemaDescriptorRegistry.getFileDescriptorByExtensionAndNumber(type, extension);
if (fd != null) {
responseObserver.onNext(createServerReflectionResponse(request, fd));
} else {
sendErrorResponse(request, TriRpcStatus.Code.NOT_FOUND, "Extension not found.", responseObserver);
}
}
private void getAllExtensions(
ServerReflectionRequest request, StreamObserver<ServerReflectionResponse> responseObserver) {
String type = request.getAllExtensionNumbersOfType();
Set<Integer> extensions = SchemaDescriptorRegistry.getExtensionNumbers(type);
if (extensions != null) {
ExtensionNumberResponse.Builder builder =
ExtensionNumberResponse.newBuilder().setBaseTypeName(type).addAllExtensionNumber(extensions);
responseObserver.onNext(ServerReflectionResponse.newBuilder()
.setValidHost(request.getHost())
.setOriginalRequest(request)
.setAllExtensionNumbersResponse(builder)
.build());
} else {
sendErrorResponse(request, TriRpcStatus.Code.NOT_FOUND, "Type not found.", responseObserver);
}
}
private void listServices(
ServerReflectionRequest request, StreamObserver<ServerReflectionResponse> responseObserver) {
ListServiceResponse.Builder builder = ListServiceResponse.newBuilder();
for (String serviceName : SchemaDescriptorRegistry.listServiceNames()) {
builder.addService(ServiceResponse.newBuilder().setName(serviceName));
}
responseObserver.onNext(ServerReflectionResponse.newBuilder()
.setValidHost(request.getHost())
.setOriginalRequest(request)
.setListServicesResponse(builder)
.build());
}
private void sendErrorResponse(
ServerReflectionRequest request,
TriRpcStatus.Code code,
String message,
StreamObserver<ServerReflectionResponse> responseObserver) {
ServerReflectionResponse response = ServerReflectionResponse.newBuilder()
.setValidHost(request.getHost())
.setOriginalRequest(request)
.setErrorResponse(
ErrorResponse.newBuilder().setErrorCode(code.code).setErrorMessage(message))
.build();
responseObserver.onNext(response);
}
private ServerReflectionResponse createServerReflectionResponse(
ServerReflectionRequest request, FileDescriptor fd) {
FileDescriptorResponse.Builder fdRBuilder = FileDescriptorResponse.newBuilder();
Set<String> seenFiles = new HashSet<>();
Queue<FileDescriptor> frontier = new ArrayDeque<>();
seenFiles.add(fd.getName());
frontier.add(fd);
while (!frontier.isEmpty()) {
FileDescriptor nextFd = frontier.remove();
fdRBuilder.addFileDescriptorProto(nextFd.toProto().toByteString());
for (FileDescriptor dependencyFd : nextFd.getDependencies()) {
if (!seenFiles.contains(dependencyFd.getName())) {
seenFiles.add(dependencyFd.getName());
frontier.add(dependencyFd);
}
}
}
return ServerReflectionResponse.newBuilder()
.setValidHost(request.getHost())
.setOriginalRequest(request)
.setFileDescriptorResponse(fdRBuilder)
.build();
}
}
| 6,177 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/service/TriHealthImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri.service;
import org.apache.dubbo.common.logger.ErrorTypeAwareLogger;
import org.apache.dubbo.common.logger.LoggerFactory;
import org.apache.dubbo.common.stream.StreamObserver;
import org.apache.dubbo.rpc.RpcContext;
import org.apache.dubbo.rpc.TriRpcStatus;
import java.util.HashMap;
import java.util.IdentityHashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import io.grpc.health.v1.DubboHealthTriple;
import io.grpc.health.v1.HealthCheckRequest;
import io.grpc.health.v1.HealthCheckResponse;
import static org.apache.dubbo.common.constants.LoggerCodeConstants.PROTOCOL_CLOSED_SERVER;
public class TriHealthImpl extends DubboHealthTriple.HealthImplBase {
private static final ErrorTypeAwareLogger logger = LoggerFactory.getErrorTypeAwareLogger(TriHealthImpl.class);
// Due to the latency of rpc calls, synchronization of the map does not help with consistency.
// However, need use ConcurrentHashMap to allow concurrent reading by check().
private final Map<String, HealthCheckResponse.ServingStatus> statusMap = new ConcurrentHashMap<>();
private final Object watchLock = new Object();
// Technically a Multimap<String, StreamObserver<HealthCheckResponse>>. The Boolean value is not
// used. The StreamObservers need to be kept in an identity-equality set, to make sure
// user-defined equals() doesn't confuse our book-keeping of the StreamObservers. Constructing
// such Multimap would require extra lines and the end result is not significantly simpler, thus I
// would rather not have the Guava collections dependency.
private final HashMap<String, IdentityHashMap<StreamObserver<HealthCheckResponse>, Boolean>> watchers =
new HashMap<>();
// Indicates if future status changes should be ignored.
private boolean terminal;
public TriHealthImpl() {
// Copy of what Go and C++ do.
statusMap.put(HealthStatusManager.SERVICE_NAME_ALL_SERVICES, HealthCheckResponse.ServingStatus.SERVING);
}
private static HealthCheckResponse getResponseForWatch(HealthCheckResponse.ServingStatus recordedStatus) {
return HealthCheckResponse.newBuilder()
.setStatus(recordedStatus == null ? HealthCheckResponse.ServingStatus.SERVICE_UNKNOWN : recordedStatus)
.build();
}
@Override
public HealthCheckResponse check(HealthCheckRequest request) {
HealthCheckResponse.ServingStatus status = statusMap.get(request.getService());
if (status != null) {
return HealthCheckResponse.newBuilder().setStatus(status).build();
}
throw TriRpcStatus.NOT_FOUND
.withDescription("unknown service " + request.getService())
.asException();
}
@Override
public void watch(HealthCheckRequest request, StreamObserver<HealthCheckResponse> responseObserver) {
final String service = request.getService();
synchronized (watchLock) {
HealthCheckResponse.ServingStatus status = statusMap.get(service);
responseObserver.onNext(getResponseForWatch(status));
IdentityHashMap<StreamObserver<HealthCheckResponse>, Boolean> serviceWatchers = watchers.get(service);
if (serviceWatchers == null) {
serviceWatchers = new IdentityHashMap<>();
watchers.put(service, serviceWatchers);
}
serviceWatchers.put(responseObserver, Boolean.TRUE);
}
RpcContext.getCancellationContext().addListener(context -> {
synchronized (watchLock) {
IdentityHashMap<StreamObserver<HealthCheckResponse>, Boolean> serviceWatchers = watchers.get(service);
if (serviceWatchers != null) {
serviceWatchers.remove(responseObserver);
if (serviceWatchers.isEmpty()) {
watchers.remove(service);
}
}
}
});
}
void setStatus(String service, HealthCheckResponse.ServingStatus status) {
synchronized (watchLock) {
if (terminal) {
logger.info("Ignoring status " + status + " for " + service);
return;
}
setStatusInternal(service, status);
}
}
private void setStatusInternal(String service, HealthCheckResponse.ServingStatus status) {
HealthCheckResponse.ServingStatus prevStatus = statusMap.put(service, status);
if (prevStatus != status) {
notifyWatchers(service, status);
}
}
void clearStatus(String service) {
synchronized (watchLock) {
if (terminal) {
logger.info("Ignoring status clearing for " + service);
return;
}
HealthCheckResponse.ServingStatus prevStatus = statusMap.remove(service);
if (prevStatus != null) {
notifyWatchers(service, null);
}
}
}
void enterTerminalState() {
synchronized (watchLock) {
if (terminal) {
logger.warn(PROTOCOL_CLOSED_SERVER, "", "", "Already terminating", new RuntimeException());
return;
}
terminal = true;
for (String service : statusMap.keySet()) {
setStatusInternal(service, HealthCheckResponse.ServingStatus.NOT_SERVING);
}
}
}
private void notifyWatchers(String service, HealthCheckResponse.ServingStatus status) {
HealthCheckResponse response = getResponseForWatch(status);
IdentityHashMap<StreamObserver<HealthCheckResponse>, Boolean> serviceWatchers = watchers.get(service);
if (serviceWatchers != null) {
for (StreamObserver<HealthCheckResponse> responseObserver : serviceWatchers.keySet()) {
responseObserver.onNext(response);
}
}
}
}
| 6,178 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/stub/StubInvocationUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.stub;
import org.apache.dubbo.common.stream.StreamObserver;
import org.apache.dubbo.rpc.Invoker;
import org.apache.dubbo.rpc.RpcInvocation;
import org.apache.dubbo.rpc.TriRpcStatus;
import org.apache.dubbo.rpc.model.MethodDescriptor;
import org.apache.dubbo.rpc.proxy.InvocationUtil;
public class StubInvocationUtil {
public static <T, R> R unaryCall(Invoker<?> invoker, MethodDescriptor methodDescriptor, T request) {
return (R) call(invoker, methodDescriptor, new Object[] {request});
}
public static <T, R> void unaryCall(
Invoker<?> invoker, MethodDescriptor method, T request, StreamObserver<R> responseObserver) {
try {
Object res = unaryCall(invoker, method, request);
responseObserver.onNext((R) res);
} catch (Exception e) {
responseObserver.onError(e);
}
responseObserver.onCompleted();
}
public static <T, R> StreamObserver<T> biOrClientStreamCall(
Invoker<?> invoker, MethodDescriptor method, StreamObserver<R> responseObserver) {
return (StreamObserver<T>) call(invoker, method, new Object[] {responseObserver});
}
public static <T, R> void serverStreamCall(
Invoker<?> invoker, MethodDescriptor method, T request, StreamObserver<R> responseObserver) {
call(invoker, method, new Object[] {request, responseObserver});
}
private static Object call(Invoker<?> invoker, MethodDescriptor methodDescriptor, Object[] arguments) {
RpcInvocation rpcInvocation = new RpcInvocation(
invoker.getUrl().getServiceModel(),
methodDescriptor.getMethodName(),
invoker.getInterface().getName(),
invoker.getUrl().getProtocolServiceKey(),
methodDescriptor.getParameterClasses(),
arguments);
// When there are multiple MethodDescriptors with the same method name, the return type will be wrong
rpcInvocation.setReturnType(methodDescriptor.getReturnClass());
try {
return InvocationUtil.invoke(invoker, rpcInvocation);
} catch (Throwable e) {
if (e instanceof RuntimeException) {
throw (RuntimeException) e;
} else {
throw TriRpcStatus.INTERNAL.withCause(e).asException();
}
}
}
}
| 6,179 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/demo/MultiClassLoaderService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package demo;
public interface MultiClassLoaderService {
Object call(MultiClassLoaderServiceRequest innerRequest);
}
| 6,180 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/demo/MultiClassLoaderServiceRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package demo;
import java.io.Serializable;
public class MultiClassLoaderServiceRequest implements Serializable {}
| 6,181 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/demo/MultiClassLoaderServiceImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package demo;
import java.util.concurrent.atomic.AtomicReference;
public class MultiClassLoaderServiceImpl implements MultiClassLoaderService {
private AtomicReference<MultiClassLoaderServiceRequest> innerRequestReference;
private AtomicReference<MultiClassLoaderServiceResult> innerResultReference;
public MultiClassLoaderServiceImpl(
AtomicReference<MultiClassLoaderServiceRequest> innerRequestReference,
AtomicReference<MultiClassLoaderServiceResult> innerResultReference) {
this.innerRequestReference = innerRequestReference;
this.innerResultReference = innerResultReference;
}
@Override
public MultiClassLoaderServiceResult call(MultiClassLoaderServiceRequest innerRequest) {
innerRequestReference.set(innerRequest);
return innerResultReference.get();
}
}
| 6,182 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/demo/Empty.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package demo;
public class Empty {}
| 6,183 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/demo/MultiClassLoaderServiceResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package demo;
import java.io.Serializable;
public class MultiClassLoaderServiceResult implements Serializable {}
| 6,184 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/org/apache/dubbo/rpc/protocol/injvm/InjvmProtocolTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.injvm;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.common.extension.ExtensionLoader;
import org.apache.dubbo.common.utils.StringUtils;
import org.apache.dubbo.rpc.Exporter;
import org.apache.dubbo.rpc.FutureContext;
import org.apache.dubbo.rpc.Invoker;
import org.apache.dubbo.rpc.Protocol;
import org.apache.dubbo.rpc.ProxyFactory;
import org.apache.dubbo.rpc.RpcContext;
import org.apache.dubbo.rpc.model.ApplicationModel;
import org.apache.dubbo.rpc.model.FrameworkModel;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.concurrent.ExecutionException;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Test;
import static org.apache.dubbo.common.constants.CommonConstants.APPLICATION_KEY;
import static org.apache.dubbo.common.constants.CommonConstants.GROUP_KEY;
import static org.apache.dubbo.common.constants.CommonConstants.INTERFACE_KEY;
import static org.apache.dubbo.common.constants.CommonConstants.VERSION_KEY;
import static org.apache.dubbo.rpc.Constants.ASYNC_KEY;
import static org.apache.dubbo.rpc.Constants.GENERIC_KEY;
import static org.apache.dubbo.rpc.Constants.LOCAL_PROTOCOL;
import static org.apache.dubbo.rpc.Constants.SCOPE_KEY;
import static org.apache.dubbo.rpc.Constants.SCOPE_LOCAL;
import static org.apache.dubbo.rpc.Constants.SCOPE_REMOTE;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* <code>ProxiesTest</code>
*/
class InjvmProtocolTest {
private final Protocol protocol =
ExtensionLoader.getExtensionLoader(Protocol.class).getAdaptiveExtension();
private final ProxyFactory proxy =
ExtensionLoader.getExtensionLoader(ProxyFactory.class).getAdaptiveExtension();
private final List<Exporter<?>> exporters = new ArrayList<>();
@AfterEach
public void after() throws Exception {
for (Exporter<?> exporter : exporters) {
exporter.unexport();
}
exporters.clear();
}
@Test
void testLocalProtocol() throws Exception {
DemoService service = new DemoServiceImpl();
Invoker<?> invoker = proxy.getInvoker(
service,
DemoService.class,
URL.valueOf("injvm://127.0.0.1/TestService")
.addParameter(INTERFACE_KEY, DemoService.class.getName())
.setScopeModel(ApplicationModel.defaultModel().getDefaultModule()));
assertTrue(invoker.isAvailable());
Exporter<?> exporter = protocol.export(invoker);
exporters.add(exporter);
service = proxy.getProxy(protocol.refer(
DemoService.class,
URL.valueOf("injvm://127.0.0.1/TestService")
.addParameter(INTERFACE_KEY, DemoService.class.getName())
.setScopeModel(ApplicationModel.defaultModel().getDefaultModule())));
assertEquals(service.getSize(new String[] {"", "", ""}), 3);
service.invoke("injvm://127.0.0.1/TestService", "invoke");
InjvmInvoker<?> injvmInvoker = new InjvmInvoker<>(
DemoService.class, URL.valueOf("injvm://127.0.0.1/TestService"), null, new HashMap<>());
assertFalse(injvmInvoker.isAvailable());
}
@Test
void testLocalProtocolWithToken() {
DemoService service = new DemoServiceImpl();
Invoker<?> invoker = proxy.getInvoker(
service,
DemoService.class,
URL.valueOf("injvm://127.0.0.1/TestService?token=abc")
.addParameter(INTERFACE_KEY, DemoService.class.getName())
.setScopeModel(ApplicationModel.defaultModel().getDefaultModule()));
assertTrue(invoker.isAvailable());
Exporter<?> exporter = protocol.export(invoker);
exporters.add(exporter);
service = proxy.getProxy(protocol.refer(
DemoService.class,
URL.valueOf("injvm://127.0.0.1/TestService")
.addParameter(INTERFACE_KEY, DemoService.class.getName())
.setScopeModel(ApplicationModel.defaultModel().getDefaultModule())));
assertEquals(service.getSize(new String[] {"", "", ""}), 3);
}
@Test
void testIsInjvmRefer() {
DemoService service = new DemoServiceImpl();
URL url = URL.valueOf("injvm://127.0.0.1/TestService")
.addParameter(INTERFACE_KEY, DemoService.class.getName())
.setScopeModel(ApplicationModel.defaultModel().getDefaultModule());
Exporter<?> exporter = protocol.export(proxy.getInvoker(service, DemoService.class, url));
exporters.add(exporter);
url = url.setProtocol("dubbo");
assertTrue(InjvmProtocol.getInjvmProtocol(FrameworkModel.defaultModel()).isInjvmRefer(url));
url = url.addParameter(GROUP_KEY, "*").addParameter(VERSION_KEY, "*");
assertTrue(InjvmProtocol.getInjvmProtocol(FrameworkModel.defaultModel()).isInjvmRefer(url));
url = URL.valueOf("fake://127.0.0.1/TestService").addParameter(SCOPE_KEY, SCOPE_LOCAL);
assertTrue(InjvmProtocol.getInjvmProtocol(FrameworkModel.defaultModel()).isInjvmRefer(url));
url = URL.valueOf("fake://127.0.0.1/TestService").addParameter(LOCAL_PROTOCOL, true);
assertTrue(InjvmProtocol.getInjvmProtocol(FrameworkModel.defaultModel()).isInjvmRefer(url));
url = URL.valueOf("fake://127.0.0.1/TestService").addParameter(SCOPE_KEY, SCOPE_REMOTE);
assertFalse(
InjvmProtocol.getInjvmProtocol(FrameworkModel.defaultModel()).isInjvmRefer(url));
url = URL.valueOf("fake://127.0.0.1/TestService").addParameter(GENERIC_KEY, true);
assertFalse(
InjvmProtocol.getInjvmProtocol(FrameworkModel.defaultModel()).isInjvmRefer(url));
url = URL.valueOf("fake://127.0.0.1/TestService").addParameter("cluster", "broadcast");
assertFalse(
InjvmProtocol.getInjvmProtocol(FrameworkModel.defaultModel()).isInjvmRefer(url));
}
@Test
void testLocalProtocolAsync() throws ExecutionException, InterruptedException {
DemoService service = new DemoServiceImpl();
URL url = URL.valueOf("injvm://127.0.0.1/TestService")
.addParameter(ASYNC_KEY, true)
.addParameter(INTERFACE_KEY, DemoService.class.getName())
.addParameter("application", "consumer")
.setScopeModel(ApplicationModel.defaultModel().getDefaultModule());
Invoker<?> invoker = proxy.getInvoker(service, DemoService.class, url);
assertTrue(invoker.isAvailable());
Exporter<?> exporter = protocol.export(invoker);
exporters.add(exporter);
service = proxy.getProxy(protocol.refer(DemoService.class, url));
assertNull(service.getAsyncResult());
assertEquals("DONE", FutureContext.getContext().getCompletableFuture().get());
}
@Test
void testApplication() {
DemoService service = new DemoServiceImpl();
URL url = URL.valueOf("injvm://127.0.0.1/TestService")
.addParameter(INTERFACE_KEY, DemoService.class.getName())
.addParameter("application", "consumer")
.addParameter(APPLICATION_KEY, "test-app")
.setScopeModel(ApplicationModel.defaultModel().getDefaultModule());
Invoker<?> invoker = proxy.getInvoker(service, DemoService.class, url);
assertTrue(invoker.isAvailable());
Exporter<?> exporter = protocol.export(invoker);
exporters.add(exporter);
service = proxy.getProxy(protocol.refer(DemoService.class, url));
assertEquals("test-app", service.getApplication());
assertTrue(StringUtils.isEmpty(RpcContext.getServiceContext().getRemoteApplicationName()));
}
@Test
void testRemoteAddress() {
DemoService service = new DemoServiceImpl();
URL url = URL.valueOf("injvm://127.0.0.1/TestService")
.addParameter(INTERFACE_KEY, DemoService.class.getName())
.addParameter("application", "consumer")
.addParameter(APPLICATION_KEY, "test-app")
.setScopeModel(ApplicationModel.defaultModel().getDefaultModule());
Invoker<?> invoker = proxy.getInvoker(service, DemoService.class, url);
assertTrue(invoker.isAvailable());
Exporter<?> exporter = protocol.export(invoker);
exporters.add(exporter);
service = proxy.getProxy(protocol.refer(DemoService.class, url));
assertEquals("127.0.0.1:0", service.getRemoteAddress());
assertNull(RpcContext.getServiceContext().getRemoteAddress());
}
}
| 6,185 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/org/apache/dubbo/rpc/protocol/injvm/Type.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.injvm;
public enum Type {
High,
Normal,
Lower
}
| 6,186 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/org/apache/dubbo/rpc/protocol/injvm/InjvmClassLoaderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.injvm;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.common.compiler.support.CtClassBuilder;
import org.apache.dubbo.common.compiler.support.JavassistCompiler;
import org.apache.dubbo.common.utils.ClassUtils;
import org.apache.dubbo.config.ApplicationConfig;
import org.apache.dubbo.rpc.Exporter;
import org.apache.dubbo.rpc.Invoker;
import org.apache.dubbo.rpc.Protocol;
import org.apache.dubbo.rpc.ProxyFactory;
import org.apache.dubbo.rpc.model.ApplicationModel;
import org.apache.dubbo.rpc.model.ConsumerModel;
import org.apache.dubbo.rpc.model.ModuleModel;
import org.apache.dubbo.rpc.model.ProviderModel;
import org.apache.dubbo.rpc.model.ServiceDescriptor;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicReference;
import javassist.CannotCompileException;
import javassist.ClassPool;
import javassist.CtClass;
import javassist.NotFoundException;
import demo.Empty;
import demo.MultiClassLoaderService;
import demo.MultiClassLoaderServiceImpl;
import demo.MultiClassLoaderServiceRequest;
import demo.MultiClassLoaderServiceResult;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
class InjvmClassLoaderTest {
@Test
void testDifferentClassLoaderRequest() throws Exception {
String basePath = DemoService.class
.getProtectionDomain()
.getCodeSource()
.getLocation()
.getFile();
basePath = java.net.URLDecoder.decode(basePath, "UTF-8");
TestClassLoader1 classLoader1 = new TestClassLoader1(basePath);
TestClassLoader1 classLoader2 = new TestClassLoader1(basePath);
TestClassLoader2 classLoader3 = new TestClassLoader2(classLoader2, basePath);
ApplicationConfig applicationConfig = new ApplicationConfig("TestApp");
ApplicationModel applicationModel = ApplicationModel.defaultModel();
applicationModel.getApplicationConfigManager().setApplication(applicationConfig);
ModuleModel moduleModel = applicationModel.newModule();
Class clazz1 = classLoader1.loadClass(MultiClassLoaderService.class.getName(), false);
Class<?> clazz1impl = classLoader1.loadClass(MultiClassLoaderServiceImpl.class.getName(), false);
Class<?> requestClazzCustom1 = compileCustomRequest(classLoader1);
Class<?> resultClazzCustom1 = compileCustomResult(classLoader1);
classLoader1.loadedClass.put(requestClazzCustom1.getName(), requestClazzCustom1);
classLoader1.loadedClass.put(resultClazzCustom1.getName(), resultClazzCustom1);
// AtomicReference to cache request/response of provider
AtomicReference innerRequestReference = new AtomicReference();
AtomicReference innerResultReference = new AtomicReference();
innerResultReference.set(resultClazzCustom1.getDeclaredConstructor().newInstance());
Constructor<?> declaredConstructor =
clazz1impl.getDeclaredConstructor(AtomicReference.class, AtomicReference.class);
// export provider
ProxyFactory proxyFactory =
moduleModel.getExtensionLoader(ProxyFactory.class).getExtension("javassist");
Protocol protocol = moduleModel.getExtensionLoader(Protocol.class).getAdaptiveExtension();
Object providerInstance = declaredConstructor.newInstance(innerRequestReference, innerResultReference);
URL url = URL.valueOf("injvm://localhost:0/" + MultiClassLoaderServiceImpl.class.getName() + "?interface="
+ MultiClassLoaderServiceImpl.class.getName());
ServiceDescriptor providerServiceDescriptor =
moduleModel.getServiceRepository().registerService(clazz1);
ProviderModel providerModel =
new ProviderModel(url.getServiceKey(), providerInstance, providerServiceDescriptor, null, null);
providerModel.setClassLoader(classLoader1);
URL providerUrl = url.setScopeModel(moduleModel).setServiceModel(providerModel);
Invoker invoker = proxyFactory.getInvoker(providerInstance, clazz1, providerUrl);
Exporter<?> exporter = protocol.export(invoker);
Class<?> clazz2 = classLoader2.loadClass(MultiClassLoaderService.class.getName(), false);
Class<?> requestClazzOrigin = classLoader2.loadClass(MultiClassLoaderServiceRequest.class.getName(), false);
Class<?> requestClazzCustom2 = compileCustomRequest(classLoader2);
Class<?> resultClazzCustom3 = compileCustomResult(classLoader3);
classLoader2.loadedClass.put(requestClazzCustom2.getName(), requestClazzCustom2);
classLoader3.loadedClass.put(resultClazzCustom3.getName(), resultClazzCustom3);
// refer consumer
ServiceDescriptor consumerServiceDescriptor =
moduleModel.getServiceRepository().registerService(clazz2);
ConsumerModel consumerModel = new ConsumerModel(
clazz2.getName(),
null,
consumerServiceDescriptor,
ApplicationModel.defaultModel().getDefaultModule(),
null,
null,
ClassUtils.getClassLoader(clazz2));
consumerModel.setClassLoader(classLoader3);
URL consumerUrl = url.setScopeModel(moduleModel).setServiceModel(consumerModel);
Object object1 = proxyFactory.getProxy(protocol.refer(clazz2, consumerUrl));
java.lang.reflect.Method callBean1 = object1.getClass().getDeclaredMethod("call", requestClazzOrigin);
callBean1.setAccessible(true);
Object result1 = callBean1.invoke(
object1, requestClazzCustom2.getDeclaredConstructor().newInstance());
// invoke result should load from classLoader3 ( sub classLoader of classLoader2 --> consumer side classLoader)
Assertions.assertEquals(resultClazzCustom3, result1.getClass());
Assertions.assertNotEquals(classLoader2, result1.getClass().getClassLoader());
// invoke reqeust param should load from classLoader1 ( provider side classLoader )
Assertions.assertEquals(
classLoader1, innerRequestReference.get().getClass().getClassLoader());
exporter.unexport();
applicationModel.destroy();
}
private Class<?> compileCustomRequest(ClassLoader classLoader)
throws NotFoundException, CannotCompileException, ClassNotFoundException {
CtClassBuilder builder = new CtClassBuilder();
builder.setClassName(MultiClassLoaderServiceRequest.class.getName() + "A");
builder.setSuperClassName(MultiClassLoaderServiceRequest.class.getName());
CtClass cls = builder.build(classLoader);
ClassPool cp = cls.getClassPool();
if (classLoader == null) {
classLoader = cp.getClassLoader();
}
return cp.toClass(
cls,
classLoader.loadClass(Empty.class.getName()),
classLoader,
JavassistCompiler.class.getProtectionDomain());
}
private Class<?> compileCustomResult(ClassLoader classLoader)
throws NotFoundException, CannotCompileException, ClassNotFoundException {
CtClassBuilder builder = new CtClassBuilder();
builder.setClassName(MultiClassLoaderServiceResult.class.getName() + "A");
builder.setSuperClassName(MultiClassLoaderServiceResult.class.getName());
CtClass cls = builder.build(classLoader);
ClassPool cp = cls.getClassPool();
if (classLoader == null) {
classLoader = cp.getClassLoader();
}
return cp.toClass(
cls,
classLoader.loadClass(Empty.class.getName()),
classLoader,
JavassistCompiler.class.getProtectionDomain());
}
private static class TestClassLoader1 extends ClassLoader {
private String basePath;
public TestClassLoader1(String basePath) {
this.basePath = basePath;
}
Map<String, Class<?>> loadedClass = new ConcurrentHashMap<>();
@Override
protected Class<?> findClass(String name) throws ClassNotFoundException {
try {
byte[] bytes = loadClassData(name);
return defineClass(name, bytes, 0, bytes.length);
} catch (Exception e) {
throw new ClassNotFoundException();
}
}
@Override
public Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
if (loadedClass.containsKey(name)) {
return loadedClass.get(name);
}
if (name.startsWith("demo")) {
if (name.equals(MultiClassLoaderServiceRequest.class.getName())
|| name.equals(MultiClassLoaderServiceResult.class.getName())) {
return super.loadClass(name, resolve);
}
Class<?> aClass = this.findClass(name);
this.loadedClass.put(name, aClass);
if (resolve) {
this.resolveClass(aClass);
}
return aClass;
} else {
Class<?> loadedClass = this.findLoadedClass(name);
if (loadedClass != null) {
return loadedClass;
} else {
return super.loadClass(name, resolve);
}
}
}
public byte[] loadClassData(String className) throws IOException {
className = className.replaceAll("\\.", "/");
String path = basePath + File.separator + className + ".class";
FileInputStream fileInputStream;
byte[] classBytes;
fileInputStream = new FileInputStream(path);
int length = fileInputStream.available();
classBytes = new byte[length];
fileInputStream.read(classBytes);
fileInputStream.close();
return classBytes;
}
}
private static class TestClassLoader2 extends ClassLoader {
private String basePath;
private TestClassLoader1 testClassLoader;
Map<String, Class<?>> loadedClass = new ConcurrentHashMap<>();
public TestClassLoader2(TestClassLoader1 testClassLoader, String basePath) {
this.testClassLoader = testClassLoader;
this.basePath = basePath;
}
@Override
protected Class<?> findClass(String name) throws ClassNotFoundException {
try {
byte[] bytes = loadClassData(name);
return defineClass(name, bytes, 0, bytes.length);
} catch (Exception e) {
return testClassLoader.loadClass(name, false);
}
}
@Override
public Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
if (loadedClass.containsKey(name)) {
return loadedClass.get(name);
}
if (name.startsWith("demo.MultiClassLoaderServiceRe") || name.startsWith("demo.Empty")) {
Class<?> aClass = this.findClass(name);
this.loadedClass.put(name, aClass);
if (resolve) {
this.resolveClass(aClass);
}
return aClass;
} else {
return testClassLoader.loadClass(name, resolve);
}
}
public byte[] loadClassData(String className) throws IOException {
className = className.replaceAll("\\.", "/");
String path = basePath + File.separator + className + ".class";
FileInputStream fileInputStream;
byte[] classBytes;
fileInputStream = new FileInputStream(path);
int length = fileInputStream.available();
classBytes = new byte[length];
fileInputStream.read(classBytes);
fileInputStream.close();
return classBytes;
}
}
}
| 6,187 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/org/apache/dubbo/rpc/protocol/injvm/ProtocolTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.injvm;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.common.extension.ExtensionLoader;
import org.apache.dubbo.rpc.Invoker;
import org.apache.dubbo.rpc.Protocol;
import org.apache.dubbo.rpc.ProxyFactory;
import org.apache.dubbo.rpc.model.ApplicationModel;
import org.apache.dubbo.rpc.model.FrameworkModel;
import org.junit.jupiter.api.Test;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
class ProtocolTest {
IEcho echo = new IEcho() {
public String echo(String e) {
return e;
}
};
static {
InjvmProtocol injvm = InjvmProtocol.getInjvmProtocol(FrameworkModel.defaultModel());
}
ProxyFactory proxyFactory =
ExtensionLoader.getExtensionLoader(ProxyFactory.class).getExtension("javassist");
URL url = URL.valueOf(
"injvm://localhost:0/org.apache.dubbo.rpc.support.IEcho?interface=org.apache.dubbo.rpc.support.IEcho")
.setScopeModel(ApplicationModel.defaultModel().getDefaultModule());
Invoker<IEcho> invoker = proxyFactory.getInvoker(echo, IEcho.class, url);
@Test
void test_destroyWontCloseAllProtocol() throws Exception {
Protocol autowireProtocol =
ExtensionLoader.getExtensionLoader(Protocol.class).getAdaptiveExtension();
Protocol InjvmProtocol =
ExtensionLoader.getExtensionLoader(Protocol.class).getExtension("injvm");
assertEquals(0, InjvmProtocol.getDefaultPort());
InjvmProtocol.export(invoker);
Invoker<IEcho> refer = InjvmProtocol.refer(IEcho.class, url);
IEcho echoProxy = proxyFactory.getProxy(refer);
assertEquals("ok", echoProxy.echo("ok"));
try {
autowireProtocol.destroy();
} catch (UnsupportedOperationException expected) {
assertThat(
expected.getMessage(),
containsString("of interface org.apache.dubbo.rpc.Protocol is not adaptive method!"));
}
assertEquals("ok2", echoProxy.echo("ok2"));
}
}
| 6,188 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/org/apache/dubbo/rpc/protocol/injvm/DemoServiceImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.injvm;
import org.apache.dubbo.rpc.RpcContext;
/**
* DemoServiceImpl
*/
public class DemoServiceImpl implements DemoService {
public DemoServiceImpl() {
super();
}
public void sayHello(String name) {
System.out.println("hello " + name);
}
public String echo(String text) {
return text;
}
public long timestamp() {
return System.currentTimeMillis();
}
public String getThreadName() {
return Thread.currentThread().getName();
}
public int getSize(String[] strs) {
if (strs == null) return -1;
return strs.length;
}
public int getSize(Object[] os) {
if (os == null) return -1;
return os.length;
}
public Object invoke(String service, String method) throws Exception {
System.out.println("RpcContext.getServerAttachment().getRemoteHost()="
+ RpcContext.getServiceContext().getRemoteHost());
return service + ":" + method;
}
public Type enumlength(Type... types) {
if (types.length == 0) return Type.Lower;
return types[0];
}
public int stringLength(String str) {
return str.length();
}
@Override
public String getAsyncResult() {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
System.out.println("getAsyncResult() Interrupted");
}
return "DONE";
}
@Override
public String getApplication() {
return RpcContext.getServiceContext().getRemoteApplicationName();
}
@Override
public String getRemoteAddress() {
return RpcContext.getServiceContext().getRemoteAddressString();
}
}
| 6,189 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/org/apache/dubbo/rpc/protocol/injvm/DemoRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.injvm;
import java.io.Serializable;
/**
* TestRequest.
*/
class DemoRequest implements Serializable {
private static final long serialVersionUID = -2579095288792344869L;
private String mServiceName;
private String mMethodName;
private Class<?>[] mParameterTypes;
private Object[] mArguments;
public DemoRequest(String serviceName, String methodName, Class<?>[] parameterTypes, Object[] args) {
mServiceName = serviceName;
mMethodName = methodName;
mParameterTypes = parameterTypes;
mArguments = args;
}
public String getServiceName() {
return mServiceName;
}
public String getMethodName() {
return mMethodName;
}
public Class<?>[] getParameterTypes() {
return mParameterTypes;
}
public Object[] getArguments() {
return mArguments;
}
}
| 6,190 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/org/apache/dubbo/rpc/protocol/injvm/InjvmDeepCopyTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.injvm;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.common.utils.ClassUtils;
import org.apache.dubbo.config.ApplicationConfig;
import org.apache.dubbo.rpc.Exporter;
import org.apache.dubbo.rpc.Invoker;
import org.apache.dubbo.rpc.Protocol;
import org.apache.dubbo.rpc.ProxyFactory;
import org.apache.dubbo.rpc.model.ApplicationModel;
import org.apache.dubbo.rpc.model.ConsumerModel;
import org.apache.dubbo.rpc.model.ModuleModel;
import org.apache.dubbo.rpc.model.ProviderModel;
import org.apache.dubbo.rpc.model.ServiceDescriptor;
import java.io.Serializable;
import java.util.concurrent.atomic.AtomicReference;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
class InjvmDeepCopyTest {
@Test
void testDeepCopy() {
ApplicationModel applicationModel = ApplicationModel.defaultModel();
applicationModel.getApplicationConfigManager().setApplication(new ApplicationConfig("TestInjvm"));
ModuleModel moduleModel = applicationModel.newModule();
AtomicReference<Data> requestReference = new AtomicReference<>();
AtomicReference<Data> responseReference = new AtomicReference<>();
Demo demo = new Demo(requestReference, responseReference);
// export provider
ProxyFactory proxyFactory =
moduleModel.getExtensionLoader(ProxyFactory.class).getExtension("javassist");
Protocol protocol = moduleModel.getExtensionLoader(Protocol.class).getAdaptiveExtension();
URL url = URL.valueOf(
"injvm://localhost:0/" + DemoInterface.class.getName() + "?interface=" + DemoInterface.class.getName());
ServiceDescriptor providerServiceDescriptor =
moduleModel.getServiceRepository().registerService(DemoInterface.class);
ProviderModel providerModel =
new ProviderModel(url.getServiceKey(), demo, providerServiceDescriptor, null, null);
URL providerUrl = url.setScopeModel(moduleModel).setServiceModel(providerModel);
Invoker invoker = proxyFactory.getInvoker(demo, DemoInterface.class, providerUrl);
Exporter<?> exporter = protocol.export(invoker);
// refer consumer
ServiceDescriptor consumerServiceDescriptor =
moduleModel.getServiceRepository().registerService(DemoInterface.class);
ConsumerModel consumerModel = new ConsumerModel(
DemoInterface.class.getName(),
null,
consumerServiceDescriptor,
ApplicationModel.defaultModel().getDefaultModule(),
null,
null,
ClassUtils.getClassLoader(DemoInterface.class));
URL consumerUrl = url.setScopeModel(moduleModel).setServiceModel(consumerModel);
DemoInterface stub = proxyFactory.getProxy(protocol.refer(DemoInterface.class, consumerUrl));
Data request = new Data();
Data response = stub.call(request);
Assertions.assertNotEquals(requestReference.get(), request);
Assertions.assertNotEquals(responseReference.get(), response);
Data response1 = stub.call(null);
Assertions.assertNull(requestReference.get());
Assertions.assertNull(responseReference.get());
Assertions.assertNull(response1);
exporter.unexport();
applicationModel.destroy();
}
interface DemoInterface {
Data call(Data obj);
}
private static class Demo implements DemoInterface {
private AtomicReference<Data> requestReference;
private AtomicReference<Data> responseReference;
public Demo(AtomicReference<Data> requestReference, AtomicReference<Data> responseReference) {
this.requestReference = requestReference;
this.responseReference = responseReference;
}
@Override
public Data call(Data obj) {
requestReference.set(obj);
Data result = null;
if (obj != null) {
result = new Data();
}
responseReference.set(result);
return result;
}
}
private static class Data implements Serializable {}
}
| 6,191 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/org/apache/dubbo/rpc/protocol/injvm/DemoService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.injvm;
/**
* <code>TestService</code>
*/
public interface DemoService {
void sayHello(String name);
String echo(String text);
long timestamp();
String getThreadName();
int getSize(String[] strs);
int getSize(Object[] os);
Object invoke(String service, String method) throws Exception;
int stringLength(String str);
Type enumlength(Type... types);
String getAsyncResult();
String getApplication();
String getRemoteAddress();
}
| 6,192 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/test/java/org/apache/dubbo/rpc/protocol/injvm/IEcho.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.injvm;
public interface IEcho {
String echo(String e);
}
| 6,193 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/main/java/org/apache/dubbo/rpc/protocol/injvm/InjvmProtocol.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.injvm;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.common.utils.CollectionUtils;
import org.apache.dubbo.common.utils.UrlUtils;
import org.apache.dubbo.rpc.Exporter;
import org.apache.dubbo.rpc.Invoker;
import org.apache.dubbo.rpc.Protocol;
import org.apache.dubbo.rpc.RpcException;
import org.apache.dubbo.rpc.model.ScopeModel;
import org.apache.dubbo.rpc.protocol.AbstractProtocol;
import java.util.Map;
import static org.apache.dubbo.common.constants.CommonConstants.BROADCAST_CLUSTER;
import static org.apache.dubbo.common.constants.CommonConstants.CLUSTER_KEY;
import static org.apache.dubbo.rpc.Constants.GENERIC_KEY;
import static org.apache.dubbo.rpc.Constants.LOCAL_PROTOCOL;
import static org.apache.dubbo.rpc.Constants.SCOPE_KEY;
import static org.apache.dubbo.rpc.Constants.SCOPE_LOCAL;
import static org.apache.dubbo.rpc.Constants.SCOPE_REMOTE;
/**
* InjvmProtocol
*/
public class InjvmProtocol extends AbstractProtocol {
public static final String NAME = LOCAL_PROTOCOL;
public static final int DEFAULT_PORT = 0;
public static InjvmProtocol getInjvmProtocol(ScopeModel scopeModel) {
return (InjvmProtocol) scopeModel.getExtensionLoader(Protocol.class).getExtension(InjvmProtocol.NAME, false);
}
static Exporter<?> getExporter(Map<String, Exporter<?>> map, URL key) {
Exporter<?> result = null;
if (!key.getServiceKey().contains("*")) {
result = map.get(key.getServiceKey());
} else {
if (CollectionUtils.isNotEmptyMap(map)) {
for (Exporter<?> exporter : map.values()) {
if (UrlUtils.isServiceKeyMatch(key, exporter.getInvoker().getUrl())) {
result = exporter;
break;
}
}
}
}
return result;
}
@Override
public int getDefaultPort() {
return DEFAULT_PORT;
}
@Override
public <T> Exporter<T> export(Invoker<T> invoker) throws RpcException {
return new InjvmExporter<T>(invoker, invoker.getUrl().getServiceKey(), exporterMap);
}
@Override
public <T> Invoker<T> protocolBindingRefer(Class<T> serviceType, URL url) throws RpcException {
return new InjvmInvoker<T>(serviceType, url, url.getServiceKey(), exporterMap);
}
public boolean isInjvmRefer(URL url) {
String scope = url.getParameter(SCOPE_KEY);
// Since injvm protocol is configured explicitly, we don't need to set any extra flag, use normal refer process.
if (SCOPE_LOCAL.equals(scope) || (url.getParameter(LOCAL_PROTOCOL, false))) {
// if it's declared as local reference
// 'scope=local' is equivalent to 'injvm=true', injvm will be deprecated in the future release
return true;
} else if (SCOPE_REMOTE.equals(scope)) {
// it's declared as remote reference
return false;
} else if (url.getParameter(GENERIC_KEY, false)) {
// generic invocation is not local reference
return false;
} else if (getExporter(exporterMap, url) != null) {
// Broadcast cluster means that multiple machines will be called,
// which is not converted to injvm protocol at this time.
if (BROADCAST_CLUSTER.equalsIgnoreCase(url.getParameter(CLUSTER_KEY))) {
return false;
}
// by default, go through local reference if there's the service exposed locally
return true;
} else {
return false;
}
}
}
| 6,194 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/main/java/org/apache/dubbo/rpc/protocol/injvm/DefaultParamDeepCopyUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.injvm;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.common.logger.ErrorTypeAwareLogger;
import org.apache.dubbo.common.logger.LoggerFactory;
import org.apache.dubbo.common.serialize.ObjectInput;
import org.apache.dubbo.common.serialize.ObjectOutput;
import org.apache.dubbo.common.serialize.Serialization;
import org.apache.dubbo.remoting.utils.UrlUtils;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import static org.apache.dubbo.common.constants.LoggerCodeConstants.PROTOCOL_ERROR_DESERIALIZE;
public class DefaultParamDeepCopyUtil implements ParamDeepCopyUtil {
private static final ErrorTypeAwareLogger logger =
LoggerFactory.getErrorTypeAwareLogger(DefaultParamDeepCopyUtil.class);
public static final String NAME = "default";
@Override
@SuppressWarnings({"unchecked"})
public <T> T copy(URL url, Object src, Class<T> targetClass) {
Serialization serialization = url.getOrDefaultFrameworkModel()
.getExtensionLoader(Serialization.class)
.getExtension(UrlUtils.serializationOrDefault(url));
try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) {
ObjectOutput objectOutput = serialization.serialize(url, outputStream);
objectOutput.writeObject(src);
objectOutput.flushBuffer();
try (ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray())) {
ObjectInput objectInput = serialization.deserialize(url, inputStream);
return objectInput.readObject(targetClass);
} catch (ClassNotFoundException | IOException e) {
logger.error(PROTOCOL_ERROR_DESERIALIZE, "", "", "Unable to deep copy parameter to target class.", e);
}
} catch (Throwable e) {
logger.error(PROTOCOL_ERROR_DESERIALIZE, "", "", "Unable to deep copy parameter to target class.", e);
}
if (src.getClass().equals(targetClass)) {
return (T) src;
} else {
return null;
}
}
}
| 6,195 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/main/java/org/apache/dubbo/rpc/protocol/injvm/InjvmInvoker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.injvm;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.common.constants.CommonConstants;
import org.apache.dubbo.common.threadlocal.InternalThreadLocalMap;
import org.apache.dubbo.common.threadpool.manager.ExecutorRepository;
import org.apache.dubbo.common.url.component.DubboServiceAddressURL;
import org.apache.dubbo.common.utils.ArrayUtils;
import org.apache.dubbo.common.utils.ExecutorUtil;
import org.apache.dubbo.common.utils.ReflectUtils;
import org.apache.dubbo.rpc.AppResponse;
import org.apache.dubbo.rpc.AsyncRpcResult;
import org.apache.dubbo.rpc.Constants;
import org.apache.dubbo.rpc.Exporter;
import org.apache.dubbo.rpc.FutureContext;
import org.apache.dubbo.rpc.Invocation;
import org.apache.dubbo.rpc.InvokeMode;
import org.apache.dubbo.rpc.Invoker;
import org.apache.dubbo.rpc.Result;
import org.apache.dubbo.rpc.RpcContext;
import org.apache.dubbo.rpc.RpcException;
import org.apache.dubbo.rpc.RpcInvocation;
import org.apache.dubbo.rpc.model.MethodDescriptor;
import org.apache.dubbo.rpc.model.ServiceModel;
import org.apache.dubbo.rpc.protocol.AbstractInvoker;
import org.apache.dubbo.rpc.support.RpcUtils;
import java.lang.reflect.Type;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import static org.apache.dubbo.common.constants.CommonConstants.DEFAULT_TIMEOUT;
import static org.apache.dubbo.common.constants.CommonConstants.LOCALHOST_VALUE;
import static org.apache.dubbo.common.constants.CommonConstants.TIMEOUT_KEY;
import static org.apache.dubbo.config.Constants.SERVER_THREAD_POOL_NAME;
import static org.apache.dubbo.rpc.Constants.ASYNC_KEY;
/**
* InjvmInvoker
*/
public class InjvmInvoker<T> extends AbstractInvoker<T> {
private final String key;
private final Map<String, Exporter<?>> exporterMap;
private volatile Exporter<?> exporter = null;
private volatile URL consumerUrl = null;
private final ExecutorRepository executorRepository;
private final ParamDeepCopyUtil paramDeepCopyUtil;
private final boolean shouldIgnoreSameModule;
private static final boolean setFutureWhenSync =
Boolean.parseBoolean(System.getProperty(CommonConstants.SET_FUTURE_IN_SYNC_MODE, "true"));
InjvmInvoker(Class<T> type, URL url, String key, Map<String, Exporter<?>> exporterMap) {
super(type, url);
this.key = key;
this.exporterMap = exporterMap;
this.executorRepository = ExecutorRepository.getInstance(url.getOrDefaultApplicationModel());
this.paramDeepCopyUtil = url.getOrDefaultFrameworkModel()
.getExtensionLoader(ParamDeepCopyUtil.class)
.getExtension(url.getParameter(CommonConstants.INJVM_COPY_UTIL_KEY, DefaultParamDeepCopyUtil.NAME));
this.shouldIgnoreSameModule = url.getParameter(CommonConstants.INJVM_IGNORE_SAME_MODULE_KEY, false);
}
@Override
public boolean isAvailable() {
InjvmExporter<?> exporter = (InjvmExporter<?>) exporterMap.get(key);
if (exporter == null) {
return false;
} else {
return super.isAvailable();
}
}
@Override
public Result doInvoke(Invocation invocation) throws Throwable {
if (exporter == null) {
exporter = InjvmProtocol.getExporter(exporterMap, getUrl());
if (exporter == null) {
throw new RpcException("Service [" + key + "] not found.");
}
}
// Solve local exposure, the server opens the token, and the client call fails.
Invoker<?> invoker = exporter.getInvoker();
URL serverURL = invoker.getUrl();
boolean serverHasToken = serverURL.hasParameter(Constants.TOKEN_KEY);
if (serverHasToken) {
invocation.setAttachment(Constants.TOKEN_KEY, serverURL.getParameter(Constants.TOKEN_KEY));
}
if (consumerUrl == null) {
// no need to sync, multi-objects is acceptable and will be gc-ed.
consumerUrl =
new DubboServiceAddressURL(serverURL.getUrlAddress(), serverURL.getUrlParam(), getUrl(), null);
}
int timeout =
RpcUtils.calculateTimeout(consumerUrl, invocation, RpcUtils.getMethodName(invocation), DEFAULT_TIMEOUT);
if (timeout <= 0) {
return AsyncRpcResult.newDefaultAsyncResult(
new RpcException(
RpcException.TIMEOUT_TERMINATE,
"No time left for making the following call: " + invocation.getServiceName() + "."
+ RpcUtils.getMethodName(invocation) + ", terminate directly."),
invocation);
}
invocation.setAttachment(TIMEOUT_KEY, String.valueOf(timeout));
String desc = ReflectUtils.getDesc(invocation.getParameterTypes());
// recreate invocation ---> deep copy parameters
Invocation copiedInvocation = recreateInvocation(invocation, invoker, desc);
if (isAsync(invoker.getUrl(), getUrl())) {
((RpcInvocation) copiedInvocation).setInvokeMode(InvokeMode.ASYNC);
// use consumer executor
ExecutorService executor = executorRepository.createExecutorIfAbsent(
ExecutorUtil.setThreadName(getUrl(), SERVER_THREAD_POOL_NAME));
CompletableFuture<AppResponse> appResponseFuture = CompletableFuture.supplyAsync(
() -> {
// clear thread local before child invocation, prevent context pollution
InternalThreadLocalMap originTL = InternalThreadLocalMap.getAndRemove();
try {
RpcContext.getServiceContext().setRemoteAddress(LOCALHOST_VALUE, 0);
RpcContext.getServiceContext().setRemoteApplicationName(getUrl().getApplication());
Result result = invoker.invoke(copiedInvocation);
if (result.hasException()) {
AppResponse appResponse = new AppResponse(result.getException());
appResponse.setObjectAttachments(new HashMap<>(result.getObjectAttachments()));
return appResponse;
} else {
rebuildValue(invocation, desc, result);
AppResponse appResponse = new AppResponse(result.getValue());
appResponse.setObjectAttachments(new HashMap<>(result.getObjectAttachments()));
return appResponse;
}
} finally {
InternalThreadLocalMap.set(originTL);
}
},
executor);
// save for 2.6.x compatibility, for example, TraceFilter in Zipkin uses com.alibaba.xxx.FutureAdapter
if (setFutureWhenSync || ((RpcInvocation) invocation).getInvokeMode() != InvokeMode.SYNC) {
FutureContext.getContext().setCompatibleFuture(appResponseFuture);
}
AsyncRpcResult result = new AsyncRpcResult(appResponseFuture, copiedInvocation);
result.setExecutor(executor);
return result;
} else {
Result result;
// clear thread local before child invocation, prevent context pollution
InternalThreadLocalMap originTL = InternalThreadLocalMap.getAndRemove();
try {
RpcContext.getServiceContext().setRemoteAddress(LOCALHOST_VALUE, 0);
RpcContext.getServiceContext().setRemoteApplicationName(getUrl().getApplication());
result = invoker.invoke(copiedInvocation);
} finally {
InternalThreadLocalMap.set(originTL);
}
CompletableFuture<AppResponse> future = new CompletableFuture<>();
AppResponse rpcResult = new AppResponse(copiedInvocation);
if (result instanceof AsyncRpcResult) {
result.whenCompleteWithContext((r, t) -> {
if (t != null) {
rpcResult.setException(t);
} else {
if (r.hasException()) {
rpcResult.setException(r.getException());
} else {
Object rebuildValue = rebuildValue(invocation, desc, r.getValue());
rpcResult.setValue(rebuildValue);
}
}
rpcResult.setObjectAttachments(new HashMap<>(r.getObjectAttachments()));
future.complete(rpcResult);
});
} else {
if (result.hasException()) {
rpcResult.setException(result.getException());
} else {
Object rebuildValue = rebuildValue(invocation, desc, result.getValue());
rpcResult.setValue(rebuildValue);
}
rpcResult.setObjectAttachments(new HashMap<>(result.getObjectAttachments()));
future.complete(rpcResult);
}
return new AsyncRpcResult(future, invocation);
}
}
private Class<?> getReturnType(ServiceModel consumerServiceModel, String methodName, String desc) {
MethodDescriptor consumerMethod = consumerServiceModel.getServiceModel().getMethod(methodName, desc);
if (consumerMethod != null) {
Type[] returnTypes = consumerMethod.getReturnTypes();
if (ArrayUtils.isNotEmpty(returnTypes)) {
return (Class<?>) returnTypes[0];
}
}
return null;
}
private Invocation recreateInvocation(Invocation invocation, Invoker<?> invoker, String desc) {
ClassLoader originClassLoader = Thread.currentThread().getContextClassLoader();
ServiceModel providerServiceModel = invoker.getUrl().getServiceModel();
if (providerServiceModel == null) {
return invocation;
}
String methodName = invocation.getMethodName();
ServiceModel consumerServiceModel = invocation.getServiceModel();
boolean shouldSkip = shouldIgnoreSameModule
&& consumerServiceModel != null
&& Objects.equals(providerServiceModel.getModuleModel(), consumerServiceModel.getModuleModel());
if (CommonConstants.$INVOKE.equals(methodName) || shouldSkip) {
// generic invoke, skip copy arguments
RpcInvocation copiedInvocation = new RpcInvocation(
invocation.getTargetServiceUniqueName(),
providerServiceModel,
methodName,
invocation.getServiceName(),
invocation.getProtocolServiceKey(),
invocation.getParameterTypes(),
invocation.getArguments(),
invocation.copyObjectAttachments(),
invocation.getInvoker(),
new HashMap<>(),
invocation instanceof RpcInvocation ? ((RpcInvocation) invocation).getInvokeMode() : null);
copiedInvocation.setInvoker(invoker);
return copiedInvocation;
}
MethodDescriptor providerMethod = providerServiceModel.getServiceModel().getMethod(methodName, desc);
Object[] realArgument = null;
if (providerMethod != null) {
Class<?>[] pts = providerMethod.getParameterClasses();
Object[] args = invocation.getArguments();
// switch ClassLoader
Thread.currentThread().setContextClassLoader(providerServiceModel.getClassLoader());
try {
// copy parameters
if (pts != null && args != null && pts.length == args.length) {
realArgument = new Object[pts.length];
for (int i = 0; i < pts.length; i++) {
realArgument[i] = paramDeepCopyUtil.copy(consumerUrl, args[i], pts[i]);
}
}
if (realArgument == null) {
realArgument = args;
}
RpcInvocation copiedInvocation = new RpcInvocation(
invocation.getTargetServiceUniqueName(),
providerServiceModel,
methodName,
invocation.getServiceName(),
invocation.getProtocolServiceKey(),
pts,
realArgument,
invocation.copyObjectAttachments(),
invocation.getInvoker(),
new HashMap<>(),
invocation instanceof RpcInvocation ? ((RpcInvocation) invocation).getInvokeMode() : null);
copiedInvocation.setInvoker(invoker);
return copiedInvocation;
} finally {
Thread.currentThread().setContextClassLoader(originClassLoader);
}
} else {
return invocation;
}
}
private Object rebuildValue(Invocation invocation, String desc, Object originValue) {
Object value = originValue;
ClassLoader cl = Thread.currentThread().getContextClassLoader();
try {
ServiceModel consumerServiceModel = getUrl().getServiceModel();
if (consumerServiceModel != null) {
Class<?> returnType = getReturnType(consumerServiceModel, invocation.getMethodName(), desc);
if (returnType != null) {
Thread.currentThread().setContextClassLoader(consumerServiceModel.getClassLoader());
value = paramDeepCopyUtil.copy(consumerUrl, originValue, returnType);
}
}
return value;
} finally {
Thread.currentThread().setContextClassLoader(cl);
}
}
private boolean isAsync(URL remoteUrl, URL localUrl) {
if (localUrl.hasParameter(ASYNC_KEY)) {
return localUrl.getParameter(ASYNC_KEY, false);
}
return remoteUrl.getParameter(ASYNC_KEY, false);
}
}
| 6,196 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/main/java/org/apache/dubbo/rpc/protocol/injvm/InjvmExporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.injvm;
import org.apache.dubbo.rpc.Exporter;
import org.apache.dubbo.rpc.Invoker;
import org.apache.dubbo.rpc.protocol.AbstractExporter;
import java.util.Map;
/**
* InjvmExporter
*/
public class InjvmExporter<T> extends AbstractExporter<T> {
private final String key;
private final Map<String, Exporter<?>> exporterMap;
InjvmExporter(Invoker<T> invoker, String key, Map<String, Exporter<?>> exporterMap) {
super(invoker);
this.key = key;
this.exporterMap = exporterMap;
exporterMap.put(key, this);
}
@Override
public void afterUnExport() {
exporterMap.remove(key);
}
}
| 6,197 |
0 | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/main/java/org/apache/dubbo/rpc/protocol | Create_ds/dubbo/dubbo-rpc/dubbo-rpc-injvm/src/main/java/org/apache/dubbo/rpc/protocol/injvm/ParamDeepCopyUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.injvm;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.common.extension.ExtensionScope;
import org.apache.dubbo.common.extension.SPI;
@SPI(scope = ExtensionScope.FRAMEWORK)
public interface ParamDeepCopyUtil {
<T> T copy(URL url, Object src, Class<T> targetClass);
}
| 6,198 |
0 | Create_ds/dubbo/dubbo-xds/src/test/java/org/apache/dubbo/registry/xds/util | Create_ds/dubbo/dubbo-xds/src/test/java/org/apache/dubbo/registry/xds/util/bootstrap/BootstrapperTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.registry.xds.util.bootstrap;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.common.url.component.URLAddress;
import org.apache.dubbo.registry.xds.XdsInitializationException;
import java.util.List;
import io.grpc.netty.shaded.io.netty.channel.unix.DomainSocketAddress;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
class BootstrapperTest {
@Test
void testParse() throws XdsInitializationException {
String rawData = "{\n" + " \"xds_servers\": [\n"
+ " {\n"
+ " \"server_uri\": \"unix:///etc/istio/proxy/XDS\",\n"
+ " \"channel_creds\": [\n"
+ " {\n"
+ " \"type\": \"insecure\"\n"
+ " }\n"
+ " ],\n"
+ " \"server_features\": [\n"
+ " \"xds_v3\"\n"
+ " ]\n"
+ " }\n"
+ " ],\n"
+ " \"node\": {\n"
+ " \"id\": \"sidecar~172.17.0.4~dubbo-demo-consumer-deployment-grpc-agent-58585cb9cd-gp79p.dubbo-demo~dubbo-demo.svc.cluster.local\",\n"
+ " \"metadata\": {\n"
+ " \"ANNOTATIONS\": {\n"
+ " \"inject.istio.io/templates\": \"grpc-agent\",\n"
+ " \"kubernetes.io/config.seen\": \"2022-07-19T12:53:29.742565722Z\",\n"
+ " \"kubernetes.io/config.source\": \"api\",\n"
+ " \"prometheus.io/path\": \"/stats/prometheus\",\n"
+ " \"prometheus.io/port\": \"15020\",\n"
+ " \"prometheus.io/scrape\": \"true\",\n"
+ " \"proxy.istio.io/config\": \"{\\\"holdApplicationUntilProxyStarts\\\": true}\",\n"
+ " \"proxy.istio.io/overrides\": \"{\\\"containers\\\":[{\\\"name\\\":\\\"app\\\",\\\"image\\\":\\\"gcr.io/istio-testing/app:latest\\\",\\\"args\\\":[\\\"--metrics=15014\\\",\\\"--port\\\",\\\"18080\\\",\\\"--tcp\\\",\\\"19090\\\",\\\"--xds-grpc-server=17070\\\",\\\"--grpc\\\",\\\"17070\\\",\\\"--grpc\\\",\\\"17171\\\",\\\"--port\\\",\\\"3333\\\",\\\"--port\\\",\\\"8080\\\",\\\"--version\\\",\\\"v1\\\",\\\"--crt=/cert.crt\\\",\\\"--key=/cert.key\\\"],\\\"ports\\\":[{\\\"containerPort\\\":17070,\\\"protocol\\\":\\\"TCP\\\"},{\\\"containerPort\\\":17171,\\\"protocol\\\":\\\"TCP\\\"},{\\\"containerPort\\\":8080,\\\"protocol\\\":\\\"TCP\\\"},{\\\"name\\\":\\\"tcp-health-port\\\",\\\"containerPort\\\":3333,\\\"protocol\\\":\\\"TCP\\\"}],\\\"env\\\":[{\\\"name\\\":\\\"INSTANCE_IP\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"apiVersion\\\":\\\"v1\\\",\\\"fieldPath\\\":\\\"status.podIP\\\"}}}],\\\"resources\\\":{},\\\"volumeMounts\\\":[{\\\"name\\\":\\\"kube-api-access-2tknx\\\",\\\"readOnly\\\":true,\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\"}],\\\"livenessProbe\\\":{\\\"tcpSocket\\\":{\\\"port\\\":\\\"tcp-health-port\\\"},\\\"initialDelaySeconds\\\":10,\\\"timeoutSeconds\\\":1,\\\"periodSeconds\\\":10,\\\"successThreshold\\\":1,\\\"failureThreshold\\\":10},\\\"readinessProbe\\\":{\\\"httpGet\\\":{\\\"path\\\":\\\"/\\\",\\\"port\\\":8080,\\\"scheme\\\":\\\"HTTP\\\"},\\\"initialDelaySeconds\\\":1,\\\"timeoutSeconds\\\":1,\\\"periodSeconds\\\":2,\\\"successThreshold\\\":1,\\\"failureThreshold\\\":10},\\\"startupProbe\\\":{\\\"tcpSocket\\\":{\\\"port\\\":\\\"tcp-health-port\\\"},\\\"timeoutSeconds\\\":1,\\\"periodSeconds\\\":10,\\\"successThreshold\\\":1,\\\"failureThreshold\\\":10},\\\"terminationMessagePath\\\":\\\"/dev/termination-log\\\",\\\"terminationMessagePolicy\\\":\\\"File\\\",\\\"imagePullPolicy\\\":\\\"Always\\\",\\\"securityContext\\\":{\\\"runAsUser\\\":1338,\\\"runAsGroup\\\":1338}},{\\\"name\\\":\\\"dubbo-demo-consumer\\\",\\\"image\\\":\\\"dockeddocking/dubbo:consumer.v1.0\\\",\\\"command\\\":[\\\"sh\\\",\\\"-c\\\",\\\"java $JAVA_OPTS -jar dubbo-demo-consumer.jar \\\"],\\\"resources\\\":{},\\\"volumeMounts\\\":[{\\\"name\\\":\\\"kube-api-access-2tknx\\\",\\\"readOnly\\\":true,\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\"}],\\\"terminationMessagePath\\\":\\\"/dev/termination-log\\\",\\\"terminationMessagePolicy\\\":\\\"File\\\",\\\"imagePullPolicy\\\":\\\"Always\\\"}]}\",\n"
+ " \"sidecar.istio.io/rewriteAppHTTPProbers\": \"false\",\n"
+ " \"sidecar.istio.io/status\": \"{\\\"initContainers\\\":null,\\\"containers\\\":[\\\"app\\\",\\\"dubbo-demo-consumer\\\",\\\"istio-proxy\\\"],\\\"volumes\\\":[\\\"workload-socket\\\",\\\"workload-certs\\\",\\\"istio-xds\\\",\\\"istio-data\\\",\\\"istio-podinfo\\\",\\\"istio-token\\\",\\\"istiod-ca-cert\\\"],\\\"imagePullSecrets\\\":null,\\\"revision\\\":\\\"default\\\"}\"\n"
+ " },\n"
+ " \"APP_CONTAINERS\": \"app,dubbo-demo-consumer\",\n"
+ " \"CLUSTER_ID\": \"Kubernetes\",\n"
+ " \"ENVOY_PROMETHEUS_PORT\": 15090,\n"
+ " \"ENVOY_STATUS_PORT\": 15021,\n"
+ " \"GENERATOR\": \"grpc\",\n"
+ " \"INSTANCE_IPS\": \"172.17.0.4\",\n"
+ " \"INTERCEPTION_MODE\": \"REDIRECT\",\n"
+ " \"ISTIO_PROXY_SHA\": \"2b6009118109b480e1d5abf3188fd7d9c0c0acf0\",\n"
+ " \"ISTIO_VERSION\": \"1.14.1\",\n"
+ " \"LABELS\": {\n"
+ " \"app\": \"dubbo-demo-consumer-dev\",\n"
+ " \"pod-template-hash\": \"58585cb9cd\",\n"
+ " \"service.istio.io/canonical-name\": \"dubbo-demo-consumer-dev\",\n"
+ " \"service.istio.io/canonical-revision\": \"v1\",\n"
+ " \"version\": \"v1\"\n"
+ " },\n"
+ " \"MESH_ID\": \"cluster.local\",\n"
+ " \"NAME\": \"dubbo-demo-consumer-deployment-grpc-agent-58585cb9cd-gp79p\",\n"
+ " \"NAMESPACE\": \"dubbo-demo\",\n"
+ " \"OWNER\": \"kubernetes://apis/apps/v1/namespaces/dubbo-demo/deployments/dubbo-demo-consumer-deployment-grpc-agent\",\n"
+ " \"PILOT_SAN\": [\n"
+ " \"istiod.istio-system.svc\"\n"
+ " ],\n"
+ " \"POD_PORTS\": \"[{\\\"containerPort\\\":17070,\\\"protocol\\\":\\\"TCP\\\"},{\\\"containerPort\\\":17171,\\\"protocol\\\":\\\"TCP\\\"},{\\\"containerPort\\\":8080,\\\"protocol\\\":\\\"TCP\\\"},{\\\"name\\\":\\\"tcp-health-port\\\",\\\"containerPort\\\":3333,\\\"protocol\\\":\\\"TCP\\\"}]\",\n"
+ " \"PROV_CERT\": \"var/run/secrets/istio/root-cert.pem\",\n"
+ " \"PROXY_CONFIG\": {\n"
+ " \"binaryPath\": \"/usr/local/bin/envoy\",\n"
+ " \"concurrency\": 2,\n"
+ " \"configPath\": \"./etc/istio/proxy\",\n"
+ " \"controlPlaneAuthPolicy\": \"MUTUAL_TLS\",\n"
+ " \"discoveryAddress\": \"istiod.istio-system.svc:15012\",\n"
+ " \"drainDuration\": \"45s\",\n"
+ " \"holdApplicationUntilProxyStarts\": true,\n"
+ " \"parentShutdownDuration\": \"60s\",\n"
+ " \"proxyAdminPort\": 15000,\n"
+ " \"serviceCluster\": \"istio-proxy\",\n"
+ " \"statNameLength\": 189,\n"
+ " \"statusPort\": 15020,\n"
+ " \"terminationDrainDuration\": \"5s\",\n"
+ " \"tracing\": {\n"
+ " \"zipkin\": {\n"
+ " \"address\": \"zipkin.istio-system:9411\"\n"
+ " }\n"
+ " }\n"
+ " },\n"
+ " \"SERVICE_ACCOUNT\": \"default\",\n"
+ " \"WORKLOAD_NAME\": \"dubbo-demo-consumer-deployment-grpc-agent\"\n"
+ " },\n"
+ " \"locality\": {},\n"
+ " \"UserAgentVersionType\": null\n"
+ " },\n"
+ " \"certificate_providers\": {\n"
+ " \"default\": {\n"
+ " \"plugin_name\": \"file_watcher\",\n"
+ " \"config\": {\n"
+ " \"certificate_file\": \"/var/lib/istio/data/cert-chain.pem\",\n"
+ " \"private_key_file\": \"/var/lib/istio/data/key.pem\",\n"
+ " \"ca_certificate_file\": \"/var/lib/istio/data/root-cert.pem\",\n"
+ " \"refresh_interval\": \"900s\"\n"
+ " }\n"
+ " }\n"
+ " },\n"
+ " \"server_listener_resource_name_template\": \"xds.istio.io/grpc/lds/inbound/%s\"\n"
+ "}";
BootstrapperImpl.bootstrapPathFromEnvVar = "";
BootstrapperImpl bootstrapper = new BootstrapperImpl();
bootstrapper.setFileReader(createFileReader(rawData));
Bootstrapper.BootstrapInfo info = bootstrapper.bootstrap();
List<Bootstrapper.ServerInfo> serverInfoList = info.servers();
Assertions.assertEquals(serverInfoList.get(0).target(), "unix:///etc/istio/proxy/XDS");
URLAddress address = URLAddress.parse(serverInfoList.get(0).target(), null, false);
Assertions.assertEquals(new DomainSocketAddress(address.getPath()).path(), "etc/istio/proxy/XDS");
}
@Test
void testUrl() {
URL url = URL.valueOf("dubbo://127.0.0.1:23456/TestService?useAgent=true");
Assertions.assertTrue(url.getParameter("useAgent", false));
}
private static BootstrapperImpl.FileReader createFileReader(final String rawData) {
return new BootstrapperImpl.FileReader() {
@Override
public String readFile(String path) {
return rawData;
}
};
}
}
| 6,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.