index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/common/MailboxExecutorFacade.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.common;
import java.util.Objects;
import java.util.concurrent.Executor;
import org.apache.flink.api.common.operators.MailboxExecutor;
public final class MailboxExecutorFacade implements Executor {
private final MailboxExecutor executor;
private final String name;
public MailboxExecutorFacade(MailboxExecutor executor, String name) {
this.executor = Objects.requireNonNull(executor);
this.name = Objects.requireNonNull(name);
}
@Override
public void execute(Runnable command) {
executor.execute(command::run, name);
}
}
| 6,200 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/common/Maps.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.common;
import java.util.HashMap;
import java.util.Map;
import java.util.function.BiFunction;
import java.util.function.Function;
public final class Maps {
private Maps() {}
public static <K, V, U> Map<K, U> transformValues(Map<K, V> map, Function<V, U> fn) {
Map<K, U> result = new HashMap<>();
for (Map.Entry<K, V> entry : map.entrySet()) {
U u = fn.apply(entry.getValue());
result.put(entry.getKey(), u);
}
return result;
}
public static <K, V, U> Map<U, V> transformKeys(Map<K, V> map, Function<K, U> fn) {
Map<U, V> result = new HashMap<>(map.size());
for (Map.Entry<K, V> entry : map.entrySet()) {
U u = fn.apply(entry.getKey());
result.put(u, entry.getValue());
}
return result;
}
public static <K, V, U> Map<K, U> transformValues(Map<K, V> map, BiFunction<K, V, U> fn) {
Map<K, U> result = new HashMap<>();
for (Map.Entry<K, V> entry : map.entrySet()) {
U u = fn.apply(entry.getKey(), entry.getValue());
result.put(entry.getKey(), u);
}
return result;
}
public static <K, T> Map<K, T> index(Iterable<T> elements, Function<T, K> indexBy) {
Map<K, T> index = new HashMap<>();
for (T element : elements) {
index.put(indexBy.apply(element), element);
}
return index;
}
}
| 6,201 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/common/KeyBy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.common;
import org.apache.flink.statefun.sdk.Address;
public final class KeyBy {
private KeyBy() {}
public static String apply(Address address) {
return address.id();
}
}
| 6,202 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/common/SerializableFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.common;
import java.io.Serializable;
import java.util.function.Function;
public interface SerializableFunction<T, R> extends Function<T, R>, Serializable {}
| 6,203 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/common/PolyglotUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.common;
import com.google.protobuf.Message;
import com.google.protobuf.Parser;
import java.io.IOException;
import java.io.InputStream;
import org.apache.flink.statefun.sdk.FunctionType;
import org.apache.flink.statefun.sdk.reqreply.generated.Address;
public final class PolyglotUtil {
private PolyglotUtil() {}
public static <M extends Message> M parseProtobufOrThrow(Parser<M> parser, InputStream input) {
try {
return parser.parseFrom(input);
} catch (IOException e) {
throw new IllegalStateException("Unable to parse a Protobuf message", e);
}
}
public static Address sdkAddressToPolyglotAddress(
org.apache.flink.statefun.sdk.Address sdkAddress) {
return Address.newBuilder()
.setNamespace(sdkAddress.type().namespace())
.setType(sdkAddress.type().name())
.setId(sdkAddress.id())
.build();
}
public static org.apache.flink.statefun.sdk.Address polyglotAddressToSdkAddress(Address address) {
return new org.apache.flink.statefun.sdk.Address(
new FunctionType(address.getNamespace(), address.getType()), address.getId());
}
}
| 6,204 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/spi/Modules.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.spi;
import java.util.*;
import org.apache.flink.statefun.extensions.ExtensionModule;
import org.apache.flink.statefun.flink.common.SetContextClassLoader;
import org.apache.flink.statefun.flink.core.StatefulFunctionsConfig;
import org.apache.flink.statefun.flink.core.StatefulFunctionsUniverse;
import org.apache.flink.statefun.flink.core.jsonmodule.JsonServiceLoader;
import org.apache.flink.statefun.flink.core.message.MessageFactoryKey;
import org.apache.flink.statefun.flink.io.spi.FlinkIoModule;
import org.apache.flink.statefun.sdk.spi.StatefulFunctionModule;
public final class Modules {
private final List<ExtensionModule> extensionModules;
private final List<FlinkIoModule> ioModules;
private final List<StatefulFunctionModule> statefulFunctionModules;
private final StatefulFunctionsConfig configuration;
private Modules(
StatefulFunctionsConfig configuration,
List<FlinkIoModule> ioModules,
List<StatefulFunctionModule> statefulFunctionModules,
List<ExtensionModule> extensionModules) {
this.configuration = Objects.requireNonNull(configuration);
this.extensionModules = extensionModules;
this.ioModules = ioModules;
this.statefulFunctionModules = statefulFunctionModules;
}
public static Modules loadFromClassPath(StatefulFunctionsConfig configuration) {
List<StatefulFunctionModule> statefulFunctionModules = new ArrayList<>();
List<FlinkIoModule> ioModules = new ArrayList<>();
List<ExtensionModule> extensionModules = new ArrayList<>();
for (ExtensionModule extensionModule : ServiceLoader.load(ExtensionModule.class)) {
extensionModules.add(extensionModule);
}
for (StatefulFunctionModule provider : ServiceLoader.load(StatefulFunctionModule.class)) {
statefulFunctionModules.add(provider);
}
String remoteModuleName = configuration.getRemoteModuleName();
for (StatefulFunctionModule provider : JsonServiceLoader.load(remoteModuleName)) {
statefulFunctionModules.add(provider);
}
for (FlinkIoModule provider : ServiceLoader.load(FlinkIoModule.class)) {
ioModules.add(provider);
}
return new Modules(configuration, ioModules, statefulFunctionModules, extensionModules);
}
public StatefulFunctionsUniverse createStatefulFunctionsUniverse() {
MessageFactoryKey factoryKey = configuration.getFactoryKey();
StatefulFunctionsUniverse universe = new StatefulFunctionsUniverse(factoryKey);
final Map<String, String> globalConfiguration = configuration.getGlobalConfigurations();
// it is important to bind and configure the extension modules first, since
// other modules (IO and functions) may use extensions already.
for (ExtensionModule module : extensionModules) {
try (SetContextClassLoader ignored = new SetContextClassLoader(module)) {
module.configure(globalConfiguration, universe);
}
}
for (FlinkIoModule module : ioModules) {
try (SetContextClassLoader ignored = new SetContextClassLoader(module)) {
module.configure(globalConfiguration, universe);
}
}
for (StatefulFunctionModule module : statefulFunctionModules) {
try (SetContextClassLoader ignored = new SetContextClassLoader(module)) {
module.configure(globalConfiguration, universe);
}
}
return universe;
}
}
| 6,205 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/spi/ExtensionResolver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.spi;
import org.apache.flink.statefun.extensions.ExtensionModule;
import org.apache.flink.statefun.sdk.TypeName;
/**
* Resolves a bound extension (bound by {@link ExtensionModule}s) given specified {@link TypeName}s.
*/
public interface ExtensionResolver {
<T> T resolveExtension(TypeName typeName, Class<T> extensionClass);
}
| 6,206 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/spi/ExtensionResolverAccessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.spi;
import org.apache.flink.statefun.sdk.spi.StatefulFunctionModule;
/**
* TODO This is a temporary workaround for accessing the {@link ExtensionResolver}. TODO We should
* expose the resolver properly once we have more usages.
*/
public final class ExtensionResolverAccessor {
private ExtensionResolverAccessor() {}
public static ExtensionResolver getExtensionResolver(StatefulFunctionModule.Binder moduleBinder) {
// the binder is always the StatefulFunctionsUniverse, which implements ExtensionResolver
return (ExtensionResolver) moduleBinder;
}
}
| 6,207 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/spi/Constants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.spi;
public class Constants {
private Constants() {}
public static final String MODULE_DIRECTORY = "/opt/statefun/modules";
public static final String FLINK_JOB_JAR_PATH = "/opt/flink/lib/statefun-flink-core.jar";
public static final String STATEFUL_FUNCTIONS_PACKAGE = "org.apache.flink.statefun.";
public static final String STATEFUL_FUNCTIONS_MODULE_NAME = "module.yaml";
}
| 6,208 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/spi/ModuleSpecs.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.spi;
import java.io.File;
import java.io.IOException;
import java.io.Serializable;
import java.net.URI;
import java.util.*;
import org.apache.flink.statefun.flink.core.spi.ModuleSpecs.ModuleSpec;
public class ModuleSpecs implements Iterable<ModuleSpec>, Serializable {
private static final long serialVersionUID = 1L;
private final List<ModuleSpec> specs;
private ModuleSpecs(List<ModuleSpec> specs) {
this.specs = Objects.requireNonNull(specs);
}
public static ModuleSpecs fromPath(String rootDirectory) throws IOException {
Objects.requireNonNull(rootDirectory);
List<ModuleSpec> loadableModules = discoverLoadableArtifacts(rootDirectory);
return new ModuleSpecs(loadableModules);
}
public static ModuleSpecs fromCollection(ModuleSpec... moduleSpecs) {
List<ModuleSpec> loadableModules = Arrays.asList(moduleSpecs);
return new ModuleSpecs(loadableModules);
}
/** Scans the given directory and looks for a List of artifacts ( */
private static List<ModuleSpec> discoverLoadableArtifacts(String rootDirectory)
throws IOException {
File parent = new File(rootDirectory);
if (!parent.exists()) {
throw new IllegalArgumentException(rootDirectory + " does not exists.");
}
if (!parent.isDirectory()) {
throw new RuntimeException(rootDirectory + " is not a directory.");
}
List<ModuleSpec> loadableFunctions = new ArrayList<>();
for (File subDirectory : nullToEmpty(parent.listFiles())) {
if (subDirectory.isDirectory()) {
ModuleSpec loadableFunction = findLoadableModuleArtifacts(subDirectory.getAbsoluteFile());
loadableFunctions.add(loadableFunction);
}
}
return loadableFunctions;
}
private static ModuleSpec findLoadableModuleArtifacts(File subDirectory) throws IOException {
ModuleSpec.Builder builder = ModuleSpec.builder();
for (File file : nullToEmpty(subDirectory.listFiles())) {
if (!file.isFile()) {
continue;
}
if (file.getName().endsWith(".jar")) {
builder.withJarFile(file.getAbsoluteFile());
} else if (file.getName().equals(Constants.STATEFUL_FUNCTIONS_MODULE_NAME)) {
// for module YAMLs we have to add the entire module directory as a
// URL path. ClassLoader#findResource("module.yaml").
builder.withYamlModuleFile(subDirectory.getAbsoluteFile());
}
}
return builder.build();
}
private static File[] nullToEmpty(File[] elements) {
return elements == null ? new File[0] : elements;
}
public List<ModuleSpec> modules() {
return specs;
}
@Override
public Iterator<ModuleSpec> iterator() {
return specs.iterator();
}
public static final class ModuleSpec implements Serializable {
private static final long serialVersionUID = 1;
private final List<URI> artifactUrls;
private ModuleSpec(List<URI> artifacts) {
this.artifactUrls = Collections.unmodifiableList(artifacts);
}
static Builder builder() {
return new Builder();
}
public List<URI> artifactUris() {
return artifactUrls;
}
static final class Builder {
private final TreeSet<URI> artifacts = new TreeSet<>();
Builder withYamlModuleFile(File file) throws IOException {
Objects.requireNonNull(file);
artifacts.add(file.getCanonicalFile().toURI());
return this;
}
Builder withJarFile(File file) throws IOException {
Objects.requireNonNull(file);
artifacts.add(file.getCanonicalFile().toURI());
return this;
}
Builder withUri(URI uri) {
Objects.requireNonNull(uri);
artifacts.add(uri);
return this;
}
ModuleSpec build() {
List<URI> sortedCopy = new ArrayList<>(artifacts);
return new ModuleSpec(sortedCopy);
}
}
}
}
| 6,209 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/NettyClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import static org.apache.flink.shaded.netty4.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.Supplier;
import org.apache.commons.io.IOUtils;
import org.apache.flink.shaded.netty4.io.netty.bootstrap.Bootstrap;
import org.apache.flink.shaded.netty4.io.netty.channel.Channel;
import org.apache.flink.shaded.netty4.io.netty.channel.ChannelDuplexHandler;
import org.apache.flink.shaded.netty4.io.netty.channel.ChannelOption;
import org.apache.flink.shaded.netty4.io.netty.channel.EventLoop;
import org.apache.flink.shaded.netty4.io.netty.channel.pool.ChannelHealthChecker;
import org.apache.flink.shaded.netty4.io.netty.channel.pool.ChannelPoolHandler;
import org.apache.flink.shaded.netty4.io.netty.channel.pool.FixedChannelPool;
import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.ReadOnlyHttpHeaders;
import org.apache.flink.shaded.netty4.io.netty.handler.ssl.SslContext;
import org.apache.flink.shaded.netty4.io.netty.handler.ssl.SslContextBuilder;
import org.apache.flink.shaded.netty4.io.netty.util.concurrent.ScheduledFuture;
import org.apache.flink.statefun.flink.common.ResourceLocator;
import org.apache.flink.statefun.flink.core.metrics.RemoteInvocationMetrics;
import org.apache.flink.statefun.flink.core.reqreply.RequestReplyClient;
import org.apache.flink.statefun.flink.core.reqreply.ToFunctionRequestSummary;
import org.apache.flink.statefun.sdk.reqreply.generated.FromFunction;
import org.apache.flink.statefun.sdk.reqreply.generated.ToFunction;
import org.apache.flink.util.Preconditions;
final class NettyClient implements RequestReplyClient, NettyClientService {
private final NettySharedResources shared;
private final FixedChannelPool pool;
private final Endpoint endpoint;
private final ReadOnlyHttpHeaders headers;
private final long totalRequestBudgetInNanos;
private final EventLoop eventLoop;
public static NettyClient from(
NettySharedResources shared, NettyRequestReplySpec spec, URI endpointUrl) {
return from(shared, spec, endpointUrl, NettyRequestReplyHandler::new);
}
static NettyClient from(
NettySharedResources shared,
NettyRequestReplySpec spec,
URI endpointUrl,
Supplier<ChannelDuplexHandler> nettyRequestReplyHandlerSupplier) {
Endpoint endpoint = new Endpoint(endpointUrl);
long totalRequestBudgetInNanos = spec.callTimeout.toNanos();
ReadOnlyHttpHeaders headers = NettyHeaders.defaultHeadersFor(endpoint.serviceAddress());
// prepare a customized bootstrap for this specific spec.
// this bootstrap reuses the select loop and io threads as other endpoints.
Bootstrap bootstrap = shared.bootstrap().clone();
bootstrap.option(CONNECT_TIMEOUT_MILLIS, (int) spec.connectTimeout.toMillis());
bootstrap.option(ChannelOption.SO_KEEPALIVE, true);
bootstrap.remoteAddress(endpoint.serviceAddress());
// setup tls
final SslContext sslContext = endpoint.useTls() ? getSslContext(spec) : null;
// setup a channel pool handler
ChannelPoolHandler poolHandler =
new HttpConnectionPoolManager(
sslContext,
spec,
endpoint.serviceAddress().getHostString(),
endpoint.serviceAddress().getPort(),
nettyRequestReplyHandlerSupplier);
// setup a fixed capacity channel pool
FixedChannelPool pool =
new FixedChannelPool(
bootstrap,
poolHandler,
ChannelHealthChecker.ACTIVE,
FixedChannelPool.AcquireTimeoutAction.FAIL,
spec.connectTimeout.toMillis(),
spec.connectionPoolMaxSize,
2147483647,
true,
true);
shared.registerClosable(pool::closeAsync);
// use a dedicated, event loop to execute timers and tasks. An event loop is backed by a single
// thread.
EventLoop eventLoop = bootstrap.config().group().next();
return new NettyClient(shared, eventLoop, pool, endpoint, headers, totalRequestBudgetInNanos);
}
private NettyClient(
NettySharedResources shared,
EventLoop anEventLoop,
FixedChannelPool pool,
Endpoint endpoint,
ReadOnlyHttpHeaders defaultHttpHeaders,
long totalRequestBudgetInNanos) {
this.shared = Objects.requireNonNull(shared);
this.eventLoop = Objects.requireNonNull(anEventLoop);
this.pool = Objects.requireNonNull(pool);
this.endpoint = Objects.requireNonNull(endpoint);
this.headers = Objects.requireNonNull(defaultHttpHeaders);
this.totalRequestBudgetInNanos = totalRequestBudgetInNanos;
}
@Override
public CompletableFuture<FromFunction> call(
ToFunctionRequestSummary requestSummary,
RemoteInvocationMetrics metrics,
ToFunction toFunction) {
NettyRequest request = new NettyRequest(this, metrics, requestSummary, toFunction);
return request.start();
}
// -------------------------------------------------------------------------------------
// The following methods are used by NettyRequest during the various attempts
// -------------------------------------------------------------------------------------
@Override
public void acquireChannel(BiConsumer<Channel, Throwable> consumer) {
pool.acquire()
.addListener(
future -> {
Throwable cause = future.cause();
if (cause != null) {
consumer.accept(null, cause);
} else {
Channel ch = (Channel) future.getNow();
consumer.accept(ch, null);
}
});
}
@Override
public void releaseChannel(Channel channel) {
EventLoop chEventLoop = channel.eventLoop();
if (chEventLoop.inEventLoop()) {
releaseChannel0(channel);
} else {
chEventLoop.execute(() -> releaseChannel0(channel));
}
}
@Override
public String queryPath() {
return endpoint.queryPath();
}
@Override
public ReadOnlyHttpHeaders headers() {
return headers;
}
@Override
public long totalRequestBudgetInNanos() {
return totalRequestBudgetInNanos;
}
@Override
public Closeable newTimeout(Runnable client, long delayInNanos) {
ScheduledFuture<?> future = eventLoop.schedule(client, delayInNanos, TimeUnit.NANOSECONDS);
return () -> future.cancel(false);
}
@Override
public void runOnEventLoop(Runnable task) {
Objects.requireNonNull(task);
if (eventLoop.inEventLoop()) {
task.run();
} else {
eventLoop.execute(task);
}
}
@Override
public boolean isShutdown() {
return shared.isShutdown();
}
@Override
public long systemNanoTime() {
return System.nanoTime();
}
@Override
public <T> void writeAndFlush(T what, Channel where, BiConsumer<Void, Throwable> andThen) {
where
.writeAndFlush(what)
.addListener(
future -> {
Throwable cause = future.cause();
andThen.accept(null, cause);
});
}
private void releaseChannel0(Channel channel) {
if (!channel.isActive()) {
// We still need to return this channel to the pool, because the connection pool
// keeps track of the number of acquired channel counts, however the pool will first consult
// the health check, and then kick that connection away.
pool.release(channel);
return;
}
if (channel.attr(ChannelAttributes.EXPIRED).get() != Boolean.TRUE) {
pool.release(channel);
return;
}
channel.close().addListener(ignored -> pool.release(channel));
}
private static SslContext getSslContext(NettyRequestReplySpec spec) {
final Optional<String> maybeTrustCaCerts = spec.getTrustedCaCerts();
final Optional<String> maybeClientCerts = spec.getClientCerts();
final Optional<String> maybeClientKey = spec.getClientKey();
final Optional<String> maybeKeyPassword = spec.getClientKeyPassword();
boolean onlyOneOfEitherCertOrKeyPresent =
maybeClientCerts.isPresent() ^ maybeClientKey.isPresent();
if (onlyOneOfEitherCertOrKeyPresent) {
throw new IllegalStateException(
"You need to provide both the cert and they key if you want to use mutual TLS.");
}
final Optional<InputStream> maybeTrustCaCertsInputStream =
maybeTrustCaCerts.map(
trustedCaCertsLocation ->
openStreamIfExistsOrThrow(
ResourceLocator.findNamedResource(trustedCaCertsLocation)));
final Optional<InputStream> maybeCertInputStream =
maybeClientCerts.map(
clientCertLocation ->
openStreamIfExistsOrThrow(ResourceLocator.findNamedResource(clientCertLocation)));
final Optional<InputStream> maybeKeyInputStream =
maybeClientKey.map(
clientKeyLocation ->
openStreamIfExistsOrThrow(ResourceLocator.findNamedResource(clientKeyLocation)));
final SslContextBuilder sslContextBuilder = SslContextBuilder.forClient();
maybeTrustCaCertsInputStream.ifPresent(sslContextBuilder::trustManager);
maybeCertInputStream.ifPresent(
certInputStream -> {
final InputStream keyInputStream =
maybeKeyInputStream.orElseThrow(
() -> new IllegalStateException("The key is required"));
if (maybeKeyPassword.isPresent()) {
try {
final String keyPasswordString =
IOUtils.toString(
ResourceLocator.findNamedResource(maybeKeyPassword.get()),
StandardCharsets.UTF_8);
sslContextBuilder.keyManager(certInputStream, keyInputStream, keyPasswordString);
} catch (IOException e) {
throw new IllegalStateException(
String.format(
"Could not read the key password from the file %s. Examples of the correct usage: 'classpath:file.txt' or '/tmp/pass', etc.",
maybeKeyPassword.get()),
e);
}
} else {
sslContextBuilder.keyManager(certInputStream, keyInputStream);
}
});
try {
return sslContextBuilder.build();
} catch (IOException e) {
throw new IllegalStateException("Could not build the ssl context.", e);
} finally {
maybeTrustCaCertsInputStream.ifPresent(NettyClient::closeWithBestEffort);
maybeCertInputStream.ifPresent(NettyClient::closeWithBestEffort);
maybeKeyInputStream.ifPresent(NettyClient::closeWithBestEffort);
}
}
private static void closeWithBestEffort(InputStream inputStream) {
try {
inputStream.close();
} catch (IOException e) {
// ignore
}
}
private static InputStream openStreamIfExistsOrThrow(URL url) {
Preconditions.checkState(url != null, "The requested resource does not exist.");
try {
return url.openStream();
} catch (IOException e) {
throw new IllegalStateException("Could not open " + url.getPath(), e);
}
}
}
| 6,210 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/NettyHeaders.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpHeaderNames;
import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpHeaderValues;
import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.ReadOnlyHttpHeaders;
import org.apache.flink.shaded.netty4.io.netty.util.AsciiString;
final class NettyHeaders {
private static final AsciiString USER_AGENT = AsciiString.cached("statefun");
static final ReadOnlyHttpHeaders EMPTY = new ReadOnlyHttpHeaders(false);
static ReadOnlyHttpHeaders defaultHeadersFor(InetSocketAddress service) {
final AsciiString serviceHost;
if (service.getPort() == 443 || service.getPort() == 80) {
// we omit well known ports from the hostname header, as it is not common
// to include them.
serviceHost = AsciiString.cached(service.getHostString());
} else {
serviceHost = AsciiString.cached(service.getHostString() + ":" + service.getPort());
}
List<AsciiString> headers = new ArrayList<>();
headers.add(HttpHeaderNames.CONTENT_TYPE);
headers.add(HttpHeaderValues.APPLICATION_OCTET_STREAM);
headers.add(HttpHeaderNames.ACCEPT);
headers.add(HttpHeaderValues.APPLICATION_OCTET_STREAM);
headers.add(HttpHeaderNames.ACCEPT_ENCODING);
headers.add(HttpHeaderValues.GZIP_DEFLATE);
headers.add(HttpHeaderNames.CONNECTION);
headers.add(HttpHeaderValues.KEEP_ALIVE);
headers.add(HttpHeaderNames.USER_AGENT);
headers.add(USER_AGENT);
headers.add(HttpHeaderNames.HOST);
headers.add(serviceHost);
headers.add(HttpHeaderNames.CONTENT_LENGTH);
headers.add(AsciiString.cached("0"));
AsciiString[] kvPairs = headers.toArray(new AsciiString[0]);
return new ReadOnlyHttpHeaders(false, kvPairs);
}
}
| 6,211 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/NettyRequestReplyHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import static org.apache.flink.statefun.flink.core.nettyclient.NettyProtobuf.serializeProtobuf;
import static org.apache.flink.util.Preconditions.checkState;
import java.util.concurrent.ThreadLocalRandom;
import javax.annotation.Nullable;
import org.apache.flink.shaded.netty4.io.netty.buffer.ByteBuf;
import org.apache.flink.shaded.netty4.io.netty.channel.ChannelDuplexHandler;
import org.apache.flink.shaded.netty4.io.netty.channel.ChannelHandlerContext;
import org.apache.flink.shaded.netty4.io.netty.channel.ChannelPromise;
import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.DefaultFullHttpRequest;
import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.DefaultHttpHeaders;
import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.FullHttpResponse;
import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpHeaderNames;
import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpHeaderValues;
import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpMethod;
import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpVersion;
import org.apache.flink.shaded.netty4.io.netty.util.ReferenceCountUtil;
import org.apache.flink.statefun.flink.core.nettyclient.exceptions.DisconnectedException;
import org.apache.flink.statefun.flink.core.nettyclient.exceptions.WrongHttpResponse;
import org.apache.flink.statefun.sdk.reqreply.generated.FromFunction;
public final class NettyRequestReplyHandler extends ChannelDuplexHandler {
private final NettyRequestTimeoutTask requestDurationTracker = new NettyRequestTimeoutTask(this);
// it is set on write.
@Nullable private NettyRequest inflightRequest;
// cache the request headers. profiling shows that creating request headers takes around 6% of
// allocations, so it is very beneficial to cache and reuse the headers.
@Nullable private DefaultHttpHeaders cachedHeaders;
// ---------------------------------------------------------------------------------------------------------
// Netty API
// ---------------------------------------------------------------------------------------------------------
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise)
throws Exception {
if (!(msg instanceof NettyRequest)) {
super.write(ctx, msg, promise);
return;
}
final NettyRequest request = (NettyRequest) msg;
if (inflightRequest != null) {
// this is a BUG: sending new request while an old request is in progress.
// we fail both of these requests.
IllegalStateException cause =
new IllegalStateException("A Channel has not finished the previous request.");
request.completeAttemptExceptionally(cause);
exceptionCaught(ctx, cause);
return;
}
this.inflightRequest = request;
// a new NettyRequestReply was introduced into the pipeline.
// we remember that request and forward an HTTP request on its behalf upstream.
// from now on, every exception thrown during the processing of this pipeline, either during the
// following section or
// during read(), will be caught and delivered to the @inFlightRequest via #exceptionCaught().
ByteBuf content = null;
try {
content = serializeProtobuf(ctx.channel().alloc()::buffer, request.toFunction());
writeHttpRequest(ctx, content, request);
scheduleRequestTimeout(ctx, request.remainingRequestBudgetNanos());
} catch (Throwable t) {
ReferenceCountUtil.safeRelease(content);
exceptionCaught(ctx, t);
}
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object message) {
final FullHttpResponse response =
(message instanceof FullHttpResponse) ? (FullHttpResponse) message : null;
try {
readHttpMessage(response);
} catch (Throwable t) {
exceptionCaught(ctx, t);
} finally {
ReferenceCountUtil.release(response);
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
requestDurationTracker.cancel();
if (!ctx.channel().isActive()) {
tryComplete(null, cause);
} else {
ctx.channel().close().addListener(ignored -> tryComplete(null, cause));
}
}
// ---------------------------------------------------------------------------------------------------------
// HTTP Request Response
// ---------------------------------------------------------------------------------------------------------
private void writeHttpRequest(ChannelHandlerContext ctx, ByteBuf bodyBuf, NettyRequest req) {
DefaultFullHttpRequest http =
new DefaultFullHttpRequest(
HttpVersion.HTTP_1_1,
HttpMethod.POST,
req.uri(),
bodyBuf,
headers(req, bodyBuf),
NettyHeaders.EMPTY);
ctx.writeAndFlush(http);
}
private DefaultHttpHeaders headers(NettyRequest req, ByteBuf bodyBuf) {
final DefaultHttpHeaders headers;
if (cachedHeaders != null) {
headers = cachedHeaders;
} else {
headers = new DefaultHttpHeaders();
headers.add(req.headers());
this.cachedHeaders = headers;
}
headers.remove(HttpHeaderNames.CONTENT_LENGTH);
headers.add(HttpHeaderNames.CONTENT_LENGTH, bodyBuf.readableBytes());
return headers;
}
private void readHttpMessage(FullHttpResponse response) {
NettyRequest current = inflightRequest;
checkState(current != null, "A read without a request");
requestDurationTracker.cancel();
checkState(response != null, "Unexpected message type");
validateFullHttpResponse(response);
FromFunction fromFn =
NettyProtobuf.deserializeProtobuf(response.content(), FromFunction.parser());
tryComplete(fromFn, null);
}
public void onReleaseToPool() {
requestDurationTracker.cancel();
inflightRequest = null;
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
requestDurationTracker.cancel();
tryComplete(null, DisconnectedException.INSTANCE);
super.channelInactive(ctx);
}
@Override
public void channelUnregistered(ChannelHandlerContext ctx) throws Exception {
requestDurationTracker.cancel();
super.channelUnregistered(ctx);
}
private void validateFullHttpResponse(FullHttpResponse response) {
//
// check the return code
//
final int code = response.status().code();
if (code < 200 || code >= 300) {
String message =
"Unexpected response code " + code + " (" + response.status().reasonPhrase() + ") ";
throw new WrongHttpResponse(message);
}
//
// check for the correct content type
//
final boolean correctContentType =
response
.headers()
.containsValue(
HttpHeaderNames.CONTENT_TYPE, HttpHeaderValues.APPLICATION_OCTET_STREAM, true);
if (!correctContentType) {
String gotContentType = response.headers().get(HttpHeaderNames.CONTENT_TYPE);
throw new IllegalStateException("Unexpected content type " + gotContentType);
}
//
// a present HTTP body is expected.
//
checkState(response.content() != null, "Unexpected empty HTTP response (no body)");
}
private void scheduleRequestTimeout(
ChannelHandlerContext ctx, final long remainingRequestBudgetNanos) {
// compute the minimum request duration with an additional random jitter. The jitter is
// uniformly distributed in the range
// of [7ms, 13ms).
long minRequestDurationJitteredNanos =
ThreadLocalRandom.current().nextLong(7_000_000, 13_000_000);
long remainingRequestBudget =
Math.max(minRequestDurationJitteredNanos, remainingRequestBudgetNanos);
requestDurationTracker.schedule(ctx, remainingRequestBudget);
}
private void tryComplete(FromFunction response, Throwable cause) {
final NettyRequest current = inflightRequest;
if (current == null) {
return;
}
inflightRequest = null;
if (cause != null) {
current.completeAttemptExceptionally(cause);
} else {
current.complete(response);
}
}
}
| 6,212 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/ChannelAttributes.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import org.apache.flink.shaded.netty4.io.netty.util.AttributeKey;
final class ChannelAttributes {
private ChannelAttributes() {}
static final AttributeKey<Boolean> EXPIRED =
AttributeKey.valueOf("org.apache.flink.statefun.flink.core.nettyclient.ExpiredKey");
static final AttributeKey<Boolean> ACQUIRED =
AttributeKey.valueOf("org.apache.flink.statefun.flink.core.nettyclient.AcquiredKey");
}
| 6,213 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/NettySharedResources.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import java.io.Closeable;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.flink.core.fs.CloseableRegistry;
import org.apache.flink.shaded.netty4.io.netty.bootstrap.Bootstrap;
import org.apache.flink.shaded.netty4.io.netty.channel.Channel;
import org.apache.flink.shaded.netty4.io.netty.channel.EventLoopGroup;
import org.apache.flink.shaded.netty4.io.netty.channel.epoll.Epoll;
import org.apache.flink.shaded.netty4.io.netty.channel.epoll.EpollEventLoopGroup;
import org.apache.flink.shaded.netty4.io.netty.channel.epoll.EpollSocketChannel;
import org.apache.flink.shaded.netty4.io.netty.channel.kqueue.KQueue;
import org.apache.flink.shaded.netty4.io.netty.channel.kqueue.KQueueEventLoopGroup;
import org.apache.flink.shaded.netty4.io.netty.channel.kqueue.KQueueSocketChannel;
import org.apache.flink.shaded.netty4.io.netty.channel.nio.NioEventLoopGroup;
import org.apache.flink.shaded.netty4.io.netty.channel.socket.nio.NioSocketChannel;
import org.apache.flink.util.IOUtils;
final class NettySharedResources {
private final AtomicBoolean shutdown = new AtomicBoolean();
private final Bootstrap bootstrap;
private final CloseableRegistry mangedResources = new CloseableRegistry();
public NettySharedResources() {
// TODO: configure DNS resolving
final EventLoopGroup workerGroup;
final Class<? extends Channel> channelClass;
if (Epoll.isAvailable()) {
workerGroup = new EpollEventLoopGroup(demonThreadFactory("netty-http-worker"));
channelClass = EpollSocketChannel.class;
} else if (KQueue.isAvailable()) {
workerGroup = new KQueueEventLoopGroup(demonThreadFactory("http-netty-worker"));
channelClass = KQueueSocketChannel.class;
} else {
workerGroup = new NioEventLoopGroup(demonThreadFactory("netty-http-client"));
channelClass = NioSocketChannel.class;
}
registerClosable(workerGroup::shutdownGracefully);
Bootstrap bootstrap = new Bootstrap();
bootstrap.group(workerGroup);
bootstrap.channel(channelClass);
this.bootstrap = bootstrap;
}
public Bootstrap bootstrap() {
return bootstrap;
}
public void registerClosable(Closeable closeable) {
try {
mangedResources.registerCloseable(closeable);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public boolean isShutdown() {
return shutdown.get();
}
public void shutdownGracefully() {
if (shutdown.compareAndSet(false, true)) {
IOUtils.closeQuietly(mangedResources);
}
}
private static ThreadFactory demonThreadFactory(String name) {
return runnable -> {
Thread t = new Thread(runnable);
t.setDaemon(true);
t.setName(name);
return t;
};
}
}
| 6,214 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/NettyRequestReplySpec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import static java.util.Optional.ofNullable;
import java.time.Duration;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Supplier;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonSetter;
public final class NettyRequestReplySpec {
// property names in the spec
public static final String CALL_TIMEOUT_PROPERTY = "call";
public static final String CONNECT_TIMEOUT_PROPERTY = "connect";
public static final String POOLED_CONNECTION_TTL_PROPERTY = "pool_ttl";
public static final String CONNECTION_POOL_MAX_SIZE_PROPERTY = "pool_size";
public static final String MAX_REQUEST_OR_RESPONSE_SIZE_IN_BYTES_PROPERTY = "payload_max_bytes";
public static final String TRUST_CA_CERTS_PROPERTY = "trust_cacerts";
public static final String CLIENT_CERT_PROPERTY = "client_cert";
public static final String CLIENT_KEY_PROPERTY = "client_key";
public static final String CLIENT_KEY_PASSWORD_PROPERTY = "client_key_password";
public static final String TIMEOUTS_PROPERTY = "timeouts";
// spec default values
@VisibleForTesting public static final Duration DEFAULT_CALL_TIMEOUT = Duration.ofMinutes(2);
@VisibleForTesting public static final Duration DEFAULT_CONNECT_TIMEOUT = Duration.ofSeconds(20);
@VisibleForTesting
public static final Duration DEFAULT_POOLED_CONNECTION_TTL = Duration.ofSeconds(15);
@VisibleForTesting public static final int DEFAULT_CONNECTION_POOL_MAX_SIZE = 1024;
@VisibleForTesting
public static final int DEFAULT_MAX_REQUEST_OR_RESPONSE_SIZE_IN_BYTES = 32 * 1048576;
// spec values
public final Duration callTimeout;
public final Duration connectTimeout;
public final Duration pooledConnectionTTL;
public final int connectionPoolMaxSize;
public final int maxRequestOrResponseSizeInBytes;
private final String trustedCaCerts;
private final String clientCerts;
private final String clientKey;
private final String clientKeyPassword;
public NettyRequestReplySpec(
@JsonProperty(CALL_TIMEOUT_PROPERTY) Duration callTimeout,
@JsonProperty(CONNECT_TIMEOUT_PROPERTY) Duration connectTimeout,
@JsonProperty(POOLED_CONNECTION_TTL_PROPERTY) Duration pooledConnectionTTL,
@JsonProperty(CONNECTION_POOL_MAX_SIZE_PROPERTY) Integer connectionPoolMaxSize,
@JsonProperty(MAX_REQUEST_OR_RESPONSE_SIZE_IN_BYTES_PROPERTY)
Integer maxRequestOrResponseSizeInBytes,
@JsonProperty(TRUST_CA_CERTS_PROPERTY) String trustedCaCerts,
@JsonProperty(CLIENT_CERT_PROPERTY) String clientCerts,
@JsonProperty(CLIENT_KEY_PROPERTY) String clientKey,
@JsonProperty(CLIENT_KEY_PASSWORD_PROPERTY) String clientKeyPassword,
@JsonProperty(TIMEOUTS_PROPERTY) Timeouts timeouts) {
this.trustedCaCerts = trustedCaCerts;
this.clientCerts = clientCerts;
this.clientKey = clientKey;
this.clientKeyPassword = clientKeyPassword;
this.callTimeout =
firstPresentOrDefault(
ofNullable(timeouts).map(Timeouts::getCallTimeout),
ofNullable(callTimeout),
() -> DEFAULT_CALL_TIMEOUT);
this.connectTimeout =
firstPresentOrDefault(
ofNullable(timeouts).map(Timeouts::getConnectTimeout),
ofNullable(connectTimeout),
() -> DEFAULT_CONNECT_TIMEOUT);
this.pooledConnectionTTL =
ofNullable(pooledConnectionTTL).orElse(DEFAULT_POOLED_CONNECTION_TTL);
this.connectionPoolMaxSize =
ofNullable(connectionPoolMaxSize).orElse(DEFAULT_CONNECTION_POOL_MAX_SIZE);
this.maxRequestOrResponseSizeInBytes =
ofNullable(maxRequestOrResponseSizeInBytes)
.orElse(DEFAULT_MAX_REQUEST_OR_RESPONSE_SIZE_IN_BYTES);
}
public Optional<String> getTrustedCaCerts() {
return Optional.ofNullable(trustedCaCerts);
}
public Optional<String> getClientCerts() {
return Optional.ofNullable(clientCerts);
}
public Optional<String> getClientKey() {
return Optional.ofNullable(clientKey);
}
public Optional<String> getClientKeyPassword() {
return Optional.ofNullable(clientKeyPassword);
}
/**
* This is a copy of {@linkplain
* org.apache.flink.statefun.flink.core.httpfn.DefaultHttpRequestReplyClientSpec.Timeouts}, to
* ease the migration from the {@code DefaultHttpRequestReplyClientFactory}.
*/
public static final class Timeouts {
private static final Duration DEFAULT_HTTP_TIMEOUT = Duration.ofMinutes(1);
private static final Duration DEFAULT_HTTP_CONNECT_TIMEOUT = Duration.ofSeconds(10);
private Duration callTimeout = DEFAULT_HTTP_TIMEOUT;
private Duration connectTimeout = DEFAULT_HTTP_CONNECT_TIMEOUT;
@JsonSetter("call")
public void setCallTimeout(Duration callTimeout) {
this.callTimeout = requireNonZeroDuration(callTimeout);
}
@JsonSetter("connect")
public void setConnectTimeout(Duration connectTimeout) {
this.connectTimeout = requireNonZeroDuration(connectTimeout);
}
public Duration getCallTimeout() {
return callTimeout;
}
public Duration getConnectTimeout() {
return connectTimeout;
}
private static Duration requireNonZeroDuration(Duration duration) {
Objects.requireNonNull(duration);
if (duration.equals(Duration.ZERO)) {
throw new IllegalArgumentException("Timeout durations must be larger than 0.");
}
return duration;
}
}
@SuppressWarnings("OptionalUsedAsFieldOrParameterType")
private static <T> T firstPresentOrDefault(Optional<T> a, Optional<T> b, Supplier<T> orElse) {
return a.orElseGet(() -> b.orElseGet(orElse));
}
}
| 6,215 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/OnFlinkThread.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/** A Thread that executes Flink's operator. */
@Documented
@Target({ElementType.METHOD, ElementType.CONSTRUCTOR})
@Retention(RetentionPolicy.RUNTIME)
public @interface OnFlinkThread {}
| 6,216 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/HttpConnectionPoolManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import java.util.Objects;
import java.util.function.Supplier;
import javax.annotation.Nullable;
import org.apache.flink.shaded.netty4.io.netty.channel.Channel;
import org.apache.flink.shaded.netty4.io.netty.channel.ChannelDuplexHandler;
import org.apache.flink.shaded.netty4.io.netty.channel.ChannelPipeline;
import org.apache.flink.shaded.netty4.io.netty.channel.pool.ChannelPoolHandler;
import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpClientCodec;
import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpContentDecompressor;
import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpObjectAggregator;
import org.apache.flink.shaded.netty4.io.netty.handler.ssl.SslContext;
import org.apache.flink.shaded.netty4.io.netty.handler.ssl.SslHandler;
final class HttpConnectionPoolManager implements ChannelPoolHandler {
private final NettyRequestReplySpec spec;
private final SslContext sslContext;
private final String peerHost;
private final int peerPort;
private final Supplier<ChannelDuplexHandler> requestReplyHandlerSupplier;
public HttpConnectionPoolManager(
@Nullable SslContext sslContext,
NettyRequestReplySpec spec,
String peerHost,
int peerPort,
Supplier<ChannelDuplexHandler> requestReplyHandlerSupplier) {
this.spec = Objects.requireNonNull(spec);
this.peerHost = Objects.requireNonNull(peerHost);
this.sslContext = sslContext;
this.peerPort = peerPort;
this.requestReplyHandlerSupplier = requestReplyHandlerSupplier;
}
@Override
public void channelAcquired(Channel channel) {
channel.attr(ChannelAttributes.ACQUIRED).set(Boolean.TRUE);
}
@Override
public void channelReleased(Channel channel) {
channel.attr(ChannelAttributes.ACQUIRED).set(Boolean.FALSE);
NettyRequestReplyHandler handler = channel.pipeline().get(NettyRequestReplyHandler.class);
handler.onReleaseToPool();
}
@Override
public void channelCreated(Channel channel) {
ChannelPipeline p = channel.pipeline();
if (sslContext != null) {
SslHandler sslHandler = sslContext.newHandler(channel.alloc(), peerHost, peerPort);
p.addLast(sslHandler);
}
p.addLast(new HttpClientCodec());
p.addLast(new HttpContentDecompressor(true));
p.addLast(new HttpObjectAggregator(spec.maxRequestOrResponseSizeInBytes, true));
p.addLast(requestReplyHandlerSupplier.get());
long channelTimeToLiveMillis = spec.pooledConnectionTTL.toMillis();
p.addLast(new HttpConnectionPoolHandler(channelTimeToLiveMillis));
}
}
| 6,217 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/HttpConnectionPoolHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import static java.lang.Boolean.FALSE;
import static java.lang.Boolean.TRUE;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import javax.annotation.Nullable;
import org.apache.flink.shaded.netty4.io.netty.channel.Channel;
import org.apache.flink.shaded.netty4.io.netty.channel.ChannelDuplexHandler;
import org.apache.flink.shaded.netty4.io.netty.channel.ChannelHandlerContext;
import org.apache.flink.shaded.netty4.io.netty.handler.ssl.SslCloseCompletionEvent;
import org.apache.flink.shaded.netty4.io.netty.util.concurrent.ScheduledFuture;
/**
* An Handler that we add to the channel pipeline that makes sure that the channel is: 1) does not
* stick a round for a long time (if {@code connectionTtlMs > 0}. 2) if this channel uses TLS, and a
* {@code SslCloseCompletionEvent} event recieved, this channel will be closed.
*/
final class HttpConnectionPoolHandler extends ChannelDuplexHandler {
private final long ttlMs;
@Nullable private ScheduledFuture<?> timer;
HttpConnectionPoolHandler(long connectionTtlMs) {
this.ttlMs = connectionTtlMs;
}
public void channelRegistered(ChannelHandlerContext ctx) throws Exception {
this.initialize(ctx);
super.channelRegistered(ctx);
}
public void channelActive(ChannelHandlerContext ctx) throws Exception {
initialize(ctx);
super.channelActive(ctx);
}
public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
initialize(ctx);
super.handlerAdded(ctx);
}
public void handlerRemoved(ChannelHandlerContext ctx) throws Exception {
destroy();
super.handlerRemoved(ctx);
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
destroy();
super.channelInactive(ctx);
}
private void initialize(ChannelHandlerContext ctx) {
if (ttlMs <= 0) {
return;
}
if (timer != null) {
return;
}
long channelTimeToLive = ttlMs + positiveRandomJitterMillis();
timer =
ctx.channel()
.eventLoop()
.schedule(() -> tryExpire(ctx, false), channelTimeToLive, TimeUnit.MILLISECONDS);
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
if (!(evt instanceof SslCloseCompletionEvent)) {
return;
}
tryExpire(ctx, true);
}
private void destroy() {
if (timer != null) {
timer.cancel(false);
timer = null;
}
}
private void tryExpire(ChannelHandlerContext ctx, boolean shouldCancelTimer) {
if (shouldCancelTimer && timer != null) {
timer.cancel(false);
}
timer = null;
Channel channel = ctx.channel();
channel.attr(ChannelAttributes.EXPIRED).set(TRUE);
if (channel.isActive() && channel.attr(ChannelAttributes.ACQUIRED).get() == FALSE) {
// this channel is sitting all idly in the connection pool, unsuspecting of whats to come.
// we close it, but leave it in the pool, as the pool doesn't offer
// an API to remove an arbitrary connection. Eventually an health check will detect this, and
// remove it.
channel.close();
}
}
/** Compute a random delay between 1 and 3 seconds. */
private static int positiveRandomJitterMillis() {
return ThreadLocalRandom.current().nextInt(1_000, 3_000);
}
}
| 6,218 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/NettyRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import java.io.Closeable;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import javax.annotation.Nullable;
import org.apache.flink.shaded.netty4.io.netty.channel.Channel;
import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.ReadOnlyHttpHeaders;
import org.apache.flink.statefun.flink.core.metrics.RemoteInvocationMetrics;
import org.apache.flink.statefun.flink.core.nettyclient.exceptions.RequestTimeoutException;
import org.apache.flink.statefun.flink.core.nettyclient.exceptions.ShutdownException;
import org.apache.flink.statefun.flink.core.reqreply.ToFunctionRequestSummary;
import org.apache.flink.statefun.sdk.reqreply.generated.FromFunction;
import org.apache.flink.statefun.sdk.reqreply.generated.ToFunction;
import org.apache.flink.util.IOUtils;
import org.apache.flink.util.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
final class NettyRequest {
private static final Logger LOG = LoggerFactory.getLogger(NettyRequest.class);
private static final AtomicReferenceFieldUpdater<NettyRequest, Channel> ATTEMPT_CHANNEL_CAS =
AtomicReferenceFieldUpdater.newUpdater(NettyRequest.class, Channel.class, "attemptChannel");
// immutable setup
private final NettyClientService client;
// request specific immutable input
private final RemoteInvocationMetrics metrics;
private final ToFunctionRequestSummary reqSummary;
private final ToFunction toFunction;
private final long requestCreatedNanos;
// holder of the result
private final CompletableFuture<FromFunction> result = new CompletableFuture<>();
// request runtime
private long attemptStartedNanos;
private int numberOfAttempts;
@Nullable private Closeable retryTask;
@Nullable private volatile Channel attemptChannel;
@OnFlinkThread
NettyRequest(
NettyClientService client,
RemoteInvocationMetrics metrics,
ToFunctionRequestSummary requestSummary,
ToFunction toFunction) {
this.client = Objects.requireNonNull(client);
this.reqSummary = Objects.requireNonNull(requestSummary);
this.metrics = Objects.requireNonNull(metrics);
this.toFunction = Objects.requireNonNull(toFunction);
this.requestCreatedNanos = client.systemNanoTime();
}
// --------------------------------------------------------------------------------------------
// Actions
// --------------------------------------------------------------------------------------------
@OnFlinkThread
CompletableFuture<FromFunction> start() {
client.runOnEventLoop(this::startAttempt);
return result;
}
@OnChannelThread
void complete(FromFunction fromFn) {
try {
onAttemptCompleted();
} catch (Throwable t) {
LOG.warn("Attempt cleanup failed", t);
}
onFinalCompleted(fromFn, null);
}
@OnClientThread
@OnChannelThread
void completeAttemptExceptionally(Throwable cause) {
try {
onAttemptCompleted();
} catch (Throwable t) {
LOG.warn("Attempt cleanup failed", t);
}
try {
onAttemptCompletedExceptionally(cause);
} catch (Throwable t) {
onFinalCompleted(null, t);
}
}
@OnClientThread
private void startAttempt() {
try {
attemptStartedNanos = client.systemNanoTime();
client.acquireChannel(this::onChannelAcquisitionComplete);
} catch (Throwable throwable) {
completeAttemptExceptionally(throwable);
}
}
// --------------------------------------------------------------------------------------------
// Events
// --------------------------------------------------------------------------------------------
@OnChannelThread
private void onChannelAcquisitionComplete(Channel ch, Throwable cause) {
if (cause != null) {
completeAttemptExceptionally(cause);
return;
}
if (!ATTEMPT_CHANNEL_CAS.compareAndSet(this, null, ch)) {
// strange. I'm trying to acquire a channel, while still holding a channel.
// this should never happen, and it is a bug.
// lets abort.
LOG.warn(
"BUG: Trying to acquire a new Netty channel, while still holding an existing one. "
+ "Failing this request, but continuing processing others.");
onFinalCompleted(
null,
new IllegalStateException(
"Unexpected request state, failing this request, but will try others."));
return;
}
// introduce the request to the pipeline.
// see ya' at the handler :)
client.writeAndFlush(this, ch, this::onFirstWriteCompleted);
}
@OnChannelThread
private void onFirstWriteCompleted(Void ignored, Throwable cause) {
if (cause != null) {
completeAttemptExceptionally(cause);
}
}
@OnClientThread
@OnChannelThread
private void onAttemptCompleted() {
// 1. release a channel if we have one. The cas here is not strictly needed,
// and it is here to be on the safe side.
Channel ch = ATTEMPT_CHANNEL_CAS.getAndSet(this, null);
if (ch != null) {
client.releaseChannel(ch);
}
final long nanoElapsed = client.systemNanoTime() - attemptStartedNanos;
final long millisElapsed = TimeUnit.NANOSECONDS.toMillis(nanoElapsed);
attemptStartedNanos = 0;
metrics.remoteInvocationLatency(millisElapsed);
IOUtils.closeQuietly(retryTask);
retryTask = null;
numberOfAttempts++;
}
@OnClientThread
@OnChannelThread
private void onAttemptCompletedExceptionally(Throwable cause) throws Throwable {
metrics.remoteInvocationFailures();
LOG.warn(
"Exception caught while trying to deliver a message: (attempt #"
+ (numberOfAttempts - 1)
+ ")"
+ reqSummary,
cause);
if (client.isShutdown()) {
throw ShutdownException.INSTANCE;
}
final long delayUntilNextAttempt = delayUntilNextAttempt();
if (delayUntilNextAttempt < 0) {
throw RequestTimeoutException.INSTANCE;
}
analyzeCausalChain(cause);
LOG.info(
"Retry #"
+ numberOfAttempts
+ " "
+ reqSummary
+ " ,About to sleep for "
+ TimeUnit.NANOSECONDS.toMillis(delayUntilNextAttempt));
// better luck next time!
Preconditions.checkState(retryTask == null);
this.retryTask = client.newTimeout(this::onAttemptBackoffTimer, delayUntilNextAttempt);
}
@OnClientThread
private void onAttemptBackoffTimer() {
if (delayUntilNextAttempt() < 0) {
completeAttemptExceptionally(RequestTimeoutException.INSTANCE);
} else if (client.isShutdown()) {
completeAttemptExceptionally(ShutdownException.INSTANCE);
} else {
startAttempt();
}
}
@OnClientThread
@OnChannelThread
private void onFinalCompleted(FromFunction result, Throwable o) {
if (o != null) {
this.result.completeExceptionally(o);
} else {
this.result.complete(result);
}
}
// ---------------------------------------------------------------------------------
// Request specific getters and setters
// ---------------------------------------------------------------------------------
CompletableFuture<FromFunction> result() {
return result;
}
long remainingRequestBudgetNanos() {
final long usedRequestBudget = client.systemNanoTime() - requestCreatedNanos;
return client.totalRequestBudgetInNanos() - usedRequestBudget;
}
ToFunction toFunction() {
return toFunction;
}
String uri() {
return client.queryPath();
}
private void analyzeCausalChain(Throwable cause) throws Throwable {
while (cause != null) {
if (!isRetryable(cause)) {
throw cause;
}
cause = cause.getCause();
}
}
private boolean isRetryable(Throwable exception) {
return !(exception instanceof ShutdownException)
&& !(exception instanceof RequestTimeoutException);
}
private long delayUntilNextAttempt() {
final long remainingRequestBudget = remainingRequestBudgetNanos();
if (remainingRequestBudget
<= 1_000 * 1_000) { // if we are left with less than a millisecond, don't retry
return -1;
}
// start with 2 milliseconds.
final long delay = (2 * 1_000 * 1_000) * (1L << numberOfAttempts);
return Math.min(delay, remainingRequestBudget);
}
public ReadOnlyHttpHeaders headers() {
return client.headers();
}
}
| 6,219 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/OnClientThread.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* A specific Thread that is bound to a {@link NettyClient} (see {@link NettyClient#eventLoop}).
* This thread is assigned to a specific NettyClient and never changes.
*/
@Documented
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
public @interface OnClientThread {}
| 6,220 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/NettyRequestReplyClientFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import java.net.URI;
import javax.annotation.Nullable;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonProcessingException;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.statefun.flink.common.json.StateFunObjectMapper;
import org.apache.flink.statefun.flink.core.reqreply.RequestReplyClient;
import org.apache.flink.statefun.flink.core.reqreply.RequestReplyClientFactory;
public final class NettyRequestReplyClientFactory implements RequestReplyClientFactory {
public static final NettyRequestReplyClientFactory INSTANCE =
new NettyRequestReplyClientFactory();
@Nullable private transient NettySharedResources sharedNettyResources;
@Override
public RequestReplyClient createTransportClient(ObjectNode transportProperties, URI endpointUrl) {
NettySharedResources resources = this.sharedNettyResources;
if (resources == null) {
this.sharedNettyResources = (resources = new NettySharedResources());
}
NettyRequestReplySpec clientSpec = parseTransportSpec(transportProperties);
return NettyClient.from(resources, clientSpec, endpointUrl);
}
@Override
public void cleanup() {
NettySharedResources resources = this.sharedNettyResources;
this.sharedNettyResources = null;
if (resources != null) {
resources.shutdownGracefully();
}
}
private static NettyRequestReplySpec parseTransportSpec(ObjectNode transportProperties) {
try {
return OBJ_MAPPER.treeToValue(transportProperties, NettyRequestReplySpec.class);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Unable to parse Netty transport spec.", e);
}
}
private static final ObjectMapper OBJ_MAPPER = StateFunObjectMapper.create();
}
| 6,221 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/Endpoint.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
final class Endpoint {
private final String queryPath;
private final InetSocketAddress serviceAddress;
private final boolean useTls;
Endpoint(URI endpointUrl) {
requireValidEndpointUri(endpointUrl);
this.useTls = endpointUrl.getScheme().equalsIgnoreCase("https");
this.queryPath = Endpoint.computeQueryPath(endpointUrl);
this.serviceAddress =
InetSocketAddress.createUnresolved(endpointUrl.getHost(), endpointPort(endpointUrl));
}
public String queryPath() {
return queryPath;
}
public InetSocketAddress serviceAddress() {
return serviceAddress;
}
public boolean useTls() {
return useTls;
}
private static int endpointPort(URI endpoint) {
int port = endpoint.getPort();
if (port > 0) {
return port;
}
if (endpoint.getScheme().equalsIgnoreCase("https")) {
return 443;
}
return 80;
}
private static String computeQueryPath(URI endpoint) {
String uri = endpoint.getPath();
if (uri == null || uri.isEmpty()) {
uri = "/";
}
String query = endpoint.getQuery();
if (query != null) {
uri += "?" + query;
}
String fragment = endpoint.getFragment();
if (fragment != null) {
uri += "#" + fragment;
}
return uri;
}
@SuppressWarnings("ResultOfMethodCallIgnored")
private static void requireValidEndpointUri(URI endpointUrl) {
try {
endpointUrl.parseServerAuthority();
} catch (URISyntaxException e) {
throw new IllegalStateException(e);
}
}
}
| 6,222 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/NettyClientService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import java.io.Closeable;
import java.util.function.BiConsumer;
import org.apache.flink.shaded.netty4.io.netty.channel.Channel;
import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.ReadOnlyHttpHeaders;
interface NettyClientService {
void acquireChannel(BiConsumer<Channel, Throwable> consumer);
void releaseChannel(Channel channel);
String queryPath();
ReadOnlyHttpHeaders headers();
long totalRequestBudgetInNanos();
Closeable newTimeout(Runnable client, long delayInNanos);
void runOnEventLoop(Runnable task);
boolean isShutdown();
long systemNanoTime();
<T> void writeAndFlush(T what, Channel ch, BiConsumer<Void, Throwable> listener);
}
| 6,223 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/OnChannelThread.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* A Thread that is assigned to a specific Netty Channel. In Netty's programming model, each channel
* is assigned to a specific thread for the lifetime of the channel, and all Channel releated events
* are dispatched on this thread.
*/
@Documented
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
public @interface OnChannelThread {}
| 6,224 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/NettyProtobuf.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import com.google.protobuf.CodedInputStream;
import com.google.protobuf.CodedOutputStream;
import com.google.protobuf.InvalidProtocolBufferException;
import com.google.protobuf.Message;
import com.google.protobuf.Parser;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.ByteBuffer;
import java.util.function.IntFunction;
import org.apache.flink.shaded.netty4.io.netty.buffer.ByteBuf;
import org.apache.flink.shaded.netty4.io.netty.buffer.ByteBufInputStream;
import org.apache.flink.shaded.netty4.io.netty.buffer.ByteBufOutputStream;
import org.apache.flink.util.Preconditions;
final class NettyProtobuf {
public static <M extends Message> ByteBuf serializeProtobuf(
IntFunction<ByteBuf> allocator, M message) {
final int requiredSize = message.getSerializedSize();
final ByteBuf buf = allocator.apply(requiredSize);
try {
if (buf.nioBufferCount() == 1) {
zeroCopySerialize(message, requiredSize, buf);
} else {
serializeOutputStream(message, buf);
}
return buf;
} catch (IOException e) {
buf.release();
throw new UncheckedIOException(e);
}
}
public static <M extends Message> M deserializeProtobuf(ByteBuf buf, Parser<M> parser) {
try {
if (buf.nioBufferCount() == 1) {
return zeroCopyDeserialize(buf, parser);
} else {
return deserializeInputStream(buf, parser);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private static <M extends Message> void zeroCopySerialize(M message, int len, ByteBuf buf)
throws IOException {
Preconditions.checkState(len <= buf.writableBytes());
final int originalWriterIndex = buf.writerIndex();
ByteBuffer nioBuf = buf.nioBuffer(originalWriterIndex, len);
CodedOutputStream out = CodedOutputStream.newInstance(nioBuf);
message.writeTo(out);
out.flush();
buf.writerIndex(originalWriterIndex + len);
}
private static <M extends Message> void serializeOutputStream(M message, ByteBuf buf)
throws IOException {
message.writeTo(new ByteBufOutputStream(buf));
}
private static <M extends Message> M zeroCopyDeserialize(ByteBuf buf, Parser<M> parser)
throws InvalidProtocolBufferException {
final int messageLength = buf.readableBytes();
final int originalReaderIndex = buf.readerIndex();
ByteBuffer nioBuffer = buf.nioBuffer(originalReaderIndex, messageLength);
CodedInputStream in = CodedInputStream.newInstance(nioBuffer);
M message = parser.parseFrom(in);
buf.readerIndex(originalReaderIndex + messageLength);
return message;
}
private static <M extends Message> M deserializeInputStream(ByteBuf buf, Parser<M> parser)
throws InvalidProtocolBufferException {
return parser.parseFrom(new ByteBufInputStream(buf));
}
}
| 6,225 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/NettyTransportModule.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import com.google.auto.service.AutoService;
import java.util.Map;
import org.apache.flink.statefun.extensions.ExtensionModule;
import org.apache.flink.statefun.flink.core.httpfn.TransportClientConstants;
@AutoService(ExtensionModule.class)
public class NettyTransportModule implements ExtensionModule {
@Override
public void configure(Map<String, String> globalConfigurations, Binder binder) {
binder.bindExtension(
TransportClientConstants.ASYNC_CLIENT_FACTORY_TYPE,
NettyRequestReplyClientFactory.INSTANCE);
}
}
| 6,226 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/NettyRequestTimeoutTask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import static org.apache.flink.util.Preconditions.checkState;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import javax.annotation.Nullable;
import org.apache.flink.shaded.netty4.io.netty.channel.ChannelHandlerContext;
import org.apache.flink.shaded.netty4.io.netty.util.concurrent.ScheduledFuture;
import org.apache.flink.statefun.flink.core.nettyclient.exceptions.RequestTimeoutException;
final class NettyRequestTimeoutTask implements Runnable {
private final NettyRequestReplyHandler handler;
@Nullable private ScheduledFuture<?> future;
@Nullable private ChannelHandlerContext ctx;
public NettyRequestTimeoutTask(NettyRequestReplyHandler handler) {
this.handler = Objects.requireNonNull(handler);
}
void schedule(ChannelHandlerContext ctx, long remainingRequestBudget) {
this.ctx = Objects.requireNonNull(ctx);
this.future = ctx.executor().schedule(this, remainingRequestBudget, TimeUnit.NANOSECONDS);
}
void cancel() {
if (future != null) {
future.cancel(false);
future = null;
}
ctx = null;
}
@Override
public void run() {
checkState(ctx != null);
checkState(future != null);
handler.exceptionCaught(ctx, RequestTimeoutException.INSTANCE);
}
}
| 6,227 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/exceptions/DisconnectedException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient.exceptions;
import java.io.IOException;
public final class DisconnectedException extends IOException {
public static final DisconnectedException INSTANCE = new DisconnectedException();
private DisconnectedException() {
super("Disconnected");
setStackTrace(new StackTraceElement[0]);
}
}
| 6,228 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/exceptions/WrongHttpResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient.exceptions;
public final class WrongHttpResponse extends RuntimeException {
public WrongHttpResponse(String message) {
super(message);
}
}
| 6,229 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/exceptions/RequestTimeoutException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient.exceptions;
import java.util.concurrent.TimeoutException;
public final class RequestTimeoutException extends TimeoutException {
public static final RequestTimeoutException INSTANCE = new RequestTimeoutException();
public RequestTimeoutException() {
setStackTrace(new StackTraceElement[] {});
}
}
| 6,230 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/exceptions/ShutdownException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient.exceptions;
public final class ShutdownException extends RuntimeException {
public static final ShutdownException INSTANCE = new ShutdownException();
public ShutdownException() {
super("Shutdown");
setStackTrace(new StackTraceElement[] {});
}
}
| 6,231 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/nettyclient/exceptions/NoMoreRoutesException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient.exceptions;
public class NoMoreRoutesException extends RuntimeException {
public NoMoreRoutesException(String message) {
super(message);
}
}
| 6,232 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/queue/Locks.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.queue;
import java.util.concurrent.atomic.AtomicLongFieldUpdater;
import java.util.concurrent.locks.ReentrantLock;
/** Provides few implementations of {@link Lock} interface to be used with {@link MpscQueue}. */
public final class Locks {
private Locks() {}
public static Lock spinLock() {
return new YieldingSpinLock();
}
public static Lock jdkReentrantLock() {
return new JdkLock();
}
// --------------------------------------------------------------------------------------------------------
// JdkLock
// --------------------------------------------------------------------------------------------------------
private static final class JdkLock implements Lock {
private final ReentrantLock lock = new ReentrantLock(true);
@Override
public void lockUninterruptibly() {
lock.lock();
}
@Override
public void unlock() {
lock.unlock();
}
}
// --------------------------------------------------------------------------------------------------------
// YieldingSpinLock
// --------------------------------------------------------------------------------------------------------
@SuppressWarnings("unused")
private static class LhsPadding {
protected long p1, p2, p3, p4, p5, p6, p7;
}
private static class Value extends LhsPadding {
protected volatile long state;
}
@SuppressWarnings("unused")
private static class RhsPadding extends Value {
protected long p9, p10, p11, p12, p13, p14, p15;
}
private static final class YieldingSpinLock extends RhsPadding implements Lock {
private static final AtomicLongFieldUpdater<Value> UPDATER =
AtomicLongFieldUpdater.newUpdater(Value.class, "state");
@Override
public void lockUninterruptibly() {
while (!UPDATER.compareAndSet(this, 0, 1)) {
Thread.yield();
}
}
@Override
public void unlock() {
UPDATER.lazySet(this, 0);
}
}
}
| 6,233 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/queue/MpscQueue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.queue;
import java.util.ArrayDeque;
import java.util.Deque;
import java.util.Objects;
import org.apache.flink.annotation.Internal;
/**
* Multi producers single consumer fifo queue.
*
* <p>This queue supports two operations:
*
* <ul>
* <li>{@link #add(Object)} atomically adds an element to this queue and returns the number of
* elements in the queue after the addition.
* <li>{@link #drainAll()} atomically obtains a snapshot of the queue and simultaneously empties
* the queue, i.e. drains it.
* </ul>
*
* @param <T> element type
*/
@Internal
public final class MpscQueue<T> {
private static final Deque<?> EMPTY = new ArrayDeque<>(0);
// -- configuration
private final Lock lock;
// -- runtime
private ArrayDeque<T> active;
private ArrayDeque<T> standby;
public MpscQueue(int initialBufferSize, Lock lock) {
this.lock = Objects.requireNonNull(lock);
this.active = new ArrayDeque<>(initialBufferSize);
this.standby = new ArrayDeque<>(initialBufferSize);
}
/**
* Adds an element to this (unbound) queue.
*
* @param element the element to add.
* @return the number of elements in the queue after the addition.
*/
public int add(T element) {
Objects.requireNonNull(element);
final Lock lock = this.lock;
lock.lockUninterruptibly();
try {
ArrayDeque<T> active = this.active;
active.addLast(element);
return active.size();
} finally {
lock.unlock();
}
}
/**
* Atomically drains the queue.
*
* @return a batch of elements that obtained atomically from that queue.
*/
public Deque<T> drainAll() {
final Lock lock = this.lock;
lock.lockUninterruptibly();
try {
final ArrayDeque<T> ready = this.active;
if (ready.isEmpty()) {
return empty();
}
// swap active with standby
this.active = this.standby;
this.standby = ready;
return ready;
} finally {
lock.unlock();
}
}
@SuppressWarnings("unchecked")
private static <T> Deque<T> empty() {
return (Deque<T>) EMPTY;
}
}
| 6,234 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/queue/Lock.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.queue;
public interface Lock {
void lockUninterruptibly();
void unlock();
}
| 6,235 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/FlinkTimerServiceFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import java.util.Objects;
import org.apache.flink.api.common.typeutils.base.StringSerializer;
import org.apache.flink.runtime.state.VoidNamespace;
import org.apache.flink.runtime.state.VoidNamespaceSerializer;
import org.apache.flink.streaming.api.operators.InternalTimeServiceManager;
import org.apache.flink.streaming.api.operators.InternalTimerService;
import org.apache.flink.streaming.api.operators.Triggerable;
final class FlinkTimerServiceFactory implements TimerServiceFactory {
private static final String DELAYED_MSG_TIMER_SERVICE_NAME = "delayed-messages-timer-service";
private final InternalTimeServiceManager<String> timeServiceManager;
@SuppressWarnings("unchecked")
FlinkTimerServiceFactory(InternalTimeServiceManager<?> timeServiceManager) {
this.timeServiceManager =
(InternalTimeServiceManager<String>) Objects.requireNonNull(timeServiceManager);
}
@Override
public InternalTimerService<VoidNamespace> createTimerService(
Triggerable<String, VoidNamespace> triggerable) {
return timeServiceManager.getInternalTimerService(
DELAYED_MSG_TIMER_SERVICE_NAME,
StringSerializer.INSTANCE,
VoidNamespaceSerializer.INSTANCE,
triggerable);
}
}
| 6,236 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/PendingAsyncOperations.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import java.util.HashMap;
import java.util.Objects;
import java.util.function.Consumer;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.state.MapState;
import org.apache.flink.statefun.flink.core.di.Inject;
import org.apache.flink.statefun.flink.core.di.Label;
import org.apache.flink.statefun.flink.core.message.Message;
import org.apache.flink.statefun.flink.core.state.State;
import org.apache.flink.statefun.sdk.Address;
final class PendingAsyncOperations {
private static final class Key {
private final Address owningAddress;
private final long futureId;
private final int hash;
public Key(Address owningAddress, long futureId) {
this.owningAddress = Objects.requireNonNull(owningAddress);
this.futureId = futureId;
this.hash = 37 * owningAddress.hashCode() + Long.hashCode(futureId);
}
@Override
public int hashCode() {
return hash;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Key that = (Key) o;
if (futureId != that.futureId) return false;
return owningAddress.equals(that.owningAddress);
}
}
private final HashMap<Key, Message> memoryStore = new HashMap<>(32 * 1024);
/** the underlying backing state handle */
private final MapState<Long, Message> backingStore;
private final Consumer<Address> keySetter;
@Inject
PendingAsyncOperations(
@Label("state") State state,
@Label("async-operations") MapState<Long, Message> backingStore) {
this(state::setCurrentKey, backingStore);
}
@VisibleForTesting
PendingAsyncOperations(Consumer<Address> keySetter, MapState<Long, Message> backingStore) {
this.backingStore = Objects.requireNonNull(backingStore);
this.keySetter = Objects.requireNonNull(keySetter);
}
/**
* Adds an uncompleted async operation.
*
* @param owningAddress the address that had registered the async operation
* @param futureId the futureId that is associated with that operation
* @param message the message that was registered with that operation
*/
void add(Address owningAddress, long futureId, Message message) {
Key key = new Key(owningAddress, futureId);
memoryStore.put(key, message);
}
/**
* Removes the completed async operation.
*
* <p>NOTE: this method should be called with {@link State#setCurrentKey(Address)} set on the
* owningAddress. This should be the case as it is called by {@link
* AsyncMessageDecorator#postApply()}.
*/
void remove(Address owningAddress, long futureId) {
if (!removeFromMemoryStore(owningAddress, futureId)) {
removeFromBackingStore(owningAddress, futureId);
}
}
/** Moves the contents of the memoryStore into the backingStore. */
void flush() {
memoryStore.forEach(this::flushState);
memoryStore.clear();
}
// ---------------------------------------------------------------------------------------------------------------
/** @return true if indeed the key was removed, false if the key wasn't present. */
private boolean removeFromMemoryStore(Address owningAddress, long futureId) {
return memoryStore.remove(new Key(owningAddress, futureId)) != null;
}
private void removeFromBackingStore(Address owningAddress, long futureId) {
try {
this.backingStore.remove(futureId);
} catch (Exception e) {
throw new IllegalStateException(
"Unable to remove a registered asynchronous operation for " + owningAddress, e);
}
}
private void flushState(Key key, Message message) {
keySetter.accept(key.owningAddress);
try {
backingStore.put(key.futureId, message);
} catch (Exception e) {
throw new IllegalStateException(
"Unable to persisted a previously registered asynchronous operation for "
+ key.owningAddress,
e);
}
}
}
| 6,237 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/PredefinedFunctionLoader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import java.util.Map;
import java.util.Objects;
import org.apache.flink.statefun.flink.common.SetContextClassLoader;
import org.apache.flink.statefun.flink.core.di.Inject;
import org.apache.flink.statefun.flink.core.di.Label;
import org.apache.flink.statefun.sdk.FunctionType;
import org.apache.flink.statefun.sdk.StatefulFunction;
import org.apache.flink.statefun.sdk.StatefulFunctionProvider;
/** An {@link FunctionLoader} that has a predefined set of {@link StatefulFunctionProvider}s. */
final class PredefinedFunctionLoader implements FunctionLoader {
private final Map<FunctionType, StatefulFunctionProvider> functionProviders;
private final Map<String, StatefulFunctionProvider> namespaceFunctionProviders;
@Inject
PredefinedFunctionLoader(
@Label("function-providers") Map<FunctionType, StatefulFunctionProvider> functionProviders,
@Label("namespace-function-providers")
Map<String, StatefulFunctionProvider> namespaceFunctionProviders) {
this.functionProviders = Objects.requireNonNull(functionProviders);
this.namespaceFunctionProviders = Objects.requireNonNull(namespaceFunctionProviders);
}
@Override
public StatefulFunction load(FunctionType functionType) {
Objects.requireNonNull(functionType);
final StatefulFunctionProvider provider = getFunctionProviderOrThrow(functionType);
final StatefulFunction statefulFunction = load(provider, functionType);
if (statefulFunction == null) {
throw new IllegalStateException(
"A provider for a type " + functionType + " has produced a NULL function");
}
return statefulFunction;
}
private StatefulFunctionProvider getFunctionProviderOrThrow(FunctionType functionType) {
StatefulFunctionProvider provider = functionProviders.get(functionType);
if (provider != null) {
return provider;
}
provider = namespaceFunctionProviders.get(functionType.namespace());
if (provider != null) {
return provider;
}
throw new IllegalArgumentException("Cannot find a provider for type " + functionType);
}
private static StatefulFunction load(
StatefulFunctionProvider provider, FunctionType functionType) {
try (SetContextClassLoader ignored = new SetContextClassLoader(provider)) {
return provider.functionOfType(functionType);
}
}
}
| 6,238 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/ApplyingContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import org.apache.flink.statefun.flink.core.message.Message;
import org.apache.flink.statefun.sdk.Context;
public interface ApplyingContext extends Context {
void apply(LiveFunction function, Message inMessage);
}
| 6,239 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/AsyncMessageDecorator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import java.util.Optional;
import java.util.OptionalLong;
import javax.annotation.Nullable;
import org.apache.flink.core.memory.DataOutputView;
import org.apache.flink.statefun.flink.core.message.Message;
import org.apache.flink.statefun.flink.core.message.MessageFactory;
import org.apache.flink.statefun.sdk.Address;
import org.apache.flink.statefun.sdk.AsyncOperationResult;
import org.apache.flink.statefun.sdk.AsyncOperationResult.Status;
/**
* Wraps the original {@link Message} where it's payload is the user supplied metadata associated
* with an async operation.
*/
final class AsyncMessageDecorator<T> implements Message {
private final PendingAsyncOperations pendingAsyncOperations;
private final long futureId;
private final Message message;
private final Throwable throwable;
private final T result;
private final boolean restored;
AsyncMessageDecorator(
PendingAsyncOperations pendingAsyncOperations,
long futureId,
Message message,
T result,
Throwable throwable) {
this.futureId = futureId;
this.pendingAsyncOperations = pendingAsyncOperations;
this.message = message;
this.throwable = throwable;
this.result = result;
this.restored = false;
}
AsyncMessageDecorator(
PendingAsyncOperations asyncOperationState, Long futureId, Message metadataMessage) {
this.futureId = futureId;
this.pendingAsyncOperations = asyncOperationState;
this.message = metadataMessage;
this.throwable = null;
this.result = null;
this.restored = true;
}
@Nullable
@Override
public Address source() {
return message.source();
}
@Override
public Address target() {
return message.target();
}
@Override
public Object payload(MessageFactory context, ClassLoader targetClassLoader) {
final Status status;
if (restored) {
status = Status.UNKNOWN;
} else if (throwable == null) {
status = Status.SUCCESS;
} else {
status = Status.FAILURE;
}
Object metadata = message.payload(context, targetClassLoader);
return new AsyncOperationResult<>(metadata, status, result, throwable);
}
@Override
public OptionalLong isBarrierMessage() {
return OptionalLong.empty();
}
@Override
public Optional<String> cancellationToken() {
return message.cancellationToken();
}
@Override
public void postApply() {
pendingAsyncOperations.remove(source(), futureId);
}
@Override
public Message copy(MessageFactory context) {
throw new UnsupportedOperationException();
}
@Override
public void writeTo(MessageFactory context, DataOutputView target) {
throw new UnsupportedOperationException();
}
}
| 6,240 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/StatefulFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import java.util.Objects;
import org.apache.flink.statefun.flink.core.message.Message;
import org.apache.flink.statefun.flink.core.message.MessageFactory;
import org.apache.flink.statefun.flink.core.metrics.FunctionTypeMetrics;
import org.apache.flink.statefun.sdk.Context;
final class StatefulFunction implements LiveFunction {
private final org.apache.flink.statefun.sdk.StatefulFunction statefulFunction;
private final FunctionTypeMetrics metrics;
private final MessageFactory messageFactory;
StatefulFunction(
org.apache.flink.statefun.sdk.StatefulFunction statefulFunction,
FunctionTypeMetrics metrics,
MessageFactory messageFactory) {
this.statefulFunction = Objects.requireNonNull(statefulFunction);
this.metrics = Objects.requireNonNull(metrics);
this.messageFactory = Objects.requireNonNull(messageFactory);
}
@Override
public void receive(Context context, Message message) {
final ClassLoader originalClassLoader = Thread.currentThread().getContextClassLoader();
try {
ClassLoader targetClassLoader = statefulFunction.getClass().getClassLoader();
Thread.currentThread().setContextClassLoader(targetClassLoader);
Object payload = message.payload(messageFactory, targetClassLoader);
statefulFunction.invoke(context, payload);
} catch (Exception e) {
throw new StatefulFunctionInvocationException(context.self().type(), e);
} finally {
Thread.currentThread().setContextClassLoader(originalClassLoader);
}
}
@Override
public FunctionTypeMetrics metrics() {
return metrics;
}
}
| 6,241 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/TimerServiceFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import org.apache.flink.runtime.state.VoidNamespace;
import org.apache.flink.streaming.api.operators.InternalTimerService;
import org.apache.flink.streaming.api.operators.Triggerable;
interface TimerServiceFactory {
InternalTimerService<VoidNamespace> createTimerService(
Triggerable<String, VoidNamespace> triggerable);
}
| 6,242 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/StatefulFunctionInvocationException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import org.apache.flink.statefun.sdk.FunctionType;
/** A Stateful Functions exception that may be thrown when invoking a function. */
public final class StatefulFunctionInvocationException extends RuntimeException {
public StatefulFunctionInvocationException(FunctionType functionType, Throwable cause) {
super(
String.format("An error occurred when attempting to invoke function %s.", functionType),
cause);
}
}
| 6,243 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/LocalSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import java.util.Objects;
import org.apache.flink.statefun.flink.core.di.Inject;
import org.apache.flink.statefun.flink.core.di.Label;
import org.apache.flink.statefun.flink.core.di.Lazy;
import org.apache.flink.statefun.flink.core.message.Message;
final class LocalSink {
private final Lazy<LocalFunctionGroup> functionGroup;
@Inject
LocalSink(@Label("function-group") Lazy<LocalFunctionGroup> functionGroup) {
this.functionGroup = Objects.requireNonNull(functionGroup);
}
void accept(Message message) {
Objects.requireNonNull(message);
functionGroup.get().enqueue(message);
}
}
| 6,244 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/SideOutputSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import java.util.Map;
import java.util.Objects;
import org.apache.flink.statefun.sdk.io.EgressIdentifier;
import org.apache.flink.streaming.api.operators.Output;
import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
import org.apache.flink.util.OutputTag;
final class SideOutputSink {
private final Map<EgressIdentifier<?>, OutputTag<Object>> outputTags;
private final Output<?> output;
private final StreamRecord<Object> record;
SideOutputSink(Map<EgressIdentifier<?>, OutputTag<Object>> outputTags, Output<?> output) {
this.outputTags = Objects.requireNonNull(outputTags);
this.output = Objects.requireNonNull(output);
this.record = new StreamRecord<>(null);
}
<T> void accept(EgressIdentifier<T> id, T message) {
Objects.requireNonNull(id);
Objects.requireNonNull(message);
OutputTag<Object> tag = outputTags.get(id);
if (tag == null) {
throw new IllegalArgumentException("Unknown egress " + id);
}
record.replace(message);
output.collect(tag, record);
}
}
| 6,245 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/StatefulFunctionRepository.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import it.unimi.dsi.fastutil.objects.ObjectOpenHashMap;
import java.util.Objects;
import org.apache.flink.statefun.flink.common.SetContextClassLoader;
import org.apache.flink.statefun.flink.core.di.Inject;
import org.apache.flink.statefun.flink.core.di.Label;
import org.apache.flink.statefun.flink.core.message.MessageFactory;
import org.apache.flink.statefun.flink.core.metrics.FuncionTypeMetricsFactory;
import org.apache.flink.statefun.flink.core.metrics.FunctionTypeMetrics;
import org.apache.flink.statefun.flink.core.metrics.FunctionTypeMetricsRepository;
import org.apache.flink.statefun.flink.core.state.FlinkStateBinder;
import org.apache.flink.statefun.flink.core.state.PersistedStates;
import org.apache.flink.statefun.flink.core.state.State;
import org.apache.flink.statefun.sdk.FunctionType;
final class StatefulFunctionRepository
implements FunctionRepository, FunctionTypeMetricsRepository {
private final ObjectOpenHashMap<FunctionType, StatefulFunction> instances;
private final State flinkState;
private final FunctionLoader functionLoader;
private final FuncionTypeMetricsFactory metricsFactory;
private final MessageFactory messageFactory;
@Inject
StatefulFunctionRepository(
@Label("function-loader") FunctionLoader functionLoader,
@Label("function-metrics-factory") FuncionTypeMetricsFactory functionMetricsFactory,
@Label("state") State state,
MessageFactory messageFactory) {
this.instances = new ObjectOpenHashMap<>();
this.functionLoader = Objects.requireNonNull(functionLoader);
this.metricsFactory = Objects.requireNonNull(functionMetricsFactory);
this.flinkState = Objects.requireNonNull(state);
this.messageFactory = Objects.requireNonNull(messageFactory);
}
@Override
public LiveFunction get(FunctionType type) {
StatefulFunction function = instances.get(type);
if (function == null) {
instances.put(type, function = load(type));
}
return function;
}
@Override
public FunctionTypeMetrics getMetrics(FunctionType functionType) {
return get(functionType).metrics();
}
private StatefulFunction load(FunctionType functionType) {
org.apache.flink.statefun.sdk.StatefulFunction statefulFunction =
functionLoader.load(functionType);
try (SetContextClassLoader ignored = new SetContextClassLoader(statefulFunction)) {
FlinkStateBinder stateBinderForType = new FlinkStateBinder(flinkState, functionType);
PersistedStates.findReflectivelyAndBind(statefulFunction, stateBinderForType);
FunctionTypeMetrics metrics = metricsFactory.forType(functionType);
return new StatefulFunction(statefulFunction, metrics, messageFactory);
}
}
}
| 6,246 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/FunctionRepository.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import org.apache.flink.statefun.sdk.FunctionType;
public interface FunctionRepository {
LiveFunction get(FunctionType type);
}
| 6,247 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/Reductions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.Executor;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.api.common.state.MapState;
import org.apache.flink.metrics.MetricGroup;
import org.apache.flink.runtime.state.KeyedStateBackend;
import org.apache.flink.runtime.state.internal.InternalListState;
import org.apache.flink.statefun.flink.core.StatefulFunctionsUniverse;
import org.apache.flink.statefun.flink.core.backpressure.BackPressureValve;
import org.apache.flink.statefun.flink.core.di.Inject;
import org.apache.flink.statefun.flink.core.di.Lazy;
import org.apache.flink.statefun.flink.core.di.ObjectContainer;
import org.apache.flink.statefun.flink.core.message.Message;
import org.apache.flink.statefun.flink.core.message.MessageFactory;
import org.apache.flink.statefun.flink.core.metrics.FlinkFuncionTypeMetricsFactory;
import org.apache.flink.statefun.flink.core.metrics.FlinkFunctionDispatcherMetrics;
import org.apache.flink.statefun.flink.core.metrics.FuncionTypeMetricsFactory;
import org.apache.flink.statefun.flink.core.metrics.FunctionDispatcherMetrics;
import org.apache.flink.statefun.flink.core.metrics.FunctionTypeMetricsRepository;
import org.apache.flink.statefun.flink.core.state.FlinkState;
import org.apache.flink.statefun.flink.core.state.State;
import org.apache.flink.statefun.flink.core.types.DynamicallyRegisteredTypes;
import org.apache.flink.statefun.sdk.io.EgressIdentifier;
import org.apache.flink.streaming.api.operators.Output;
import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
import org.apache.flink.util.OutputTag;
final class Reductions {
private final LocalFunctionGroup localFunctionGroup;
private final PendingAsyncOperations pendingAsyncOperations;
@Inject
Reductions(PendingAsyncOperations pendingAsyncOperations, LocalFunctionGroup functionGroup) {
this.localFunctionGroup = Objects.requireNonNull(functionGroup);
this.pendingAsyncOperations = Objects.requireNonNull(pendingAsyncOperations);
}
static Reductions create(
BackPressureValve valve,
StatefulFunctionsUniverse statefulFunctionsUniverse,
RuntimeContext context,
KeyedStateBackend<Object> keyedStateBackend,
TimerServiceFactory timerServiceFactory,
InternalListState<String, Long, Message> delayedMessagesBufferState,
MapState<String, Long> delayMessageIndex,
Map<EgressIdentifier<?>, OutputTag<Object>> sideOutputs,
Output<StreamRecord<Message>> output,
MessageFactory messageFactory,
Executor mailboxExecutor,
MetricGroup metricGroup,
MapState<Long, Message> asyncOperations) {
ObjectContainer container = new ObjectContainer();
container.add("function-providers", Map.class, statefulFunctionsUniverse.functions());
container.add(
"namespace-function-providers", Map.class, statefulFunctionsUniverse.namespaceFunctions());
container.add(
"function-repository", FunctionRepository.class, StatefulFunctionRepository.class);
container.addAlias(
"function-metrics-repository",
FunctionTypeMetricsRepository.class,
"function-repository",
FunctionRepository.class);
// for FlinkState
container.add("runtime-context", RuntimeContext.class, context);
container.add("keyed-state-backend", KeyedStateBackend.class, keyedStateBackend);
container.add(new DynamicallyRegisteredTypes(statefulFunctionsUniverse.types()));
container.add("state", State.class, FlinkState.class);
// For reductions
container.add(messageFactory);
container.add(
new Partition(
context.getMaxNumberOfParallelSubtasks(),
context.getNumberOfParallelSubtasks(),
context.getIndexOfThisSubtask()));
container.add(new RemoteSink(output));
container.add(new SideOutputSink(sideOutputs, output));
container.add("applying-context", ApplyingContext.class, ReusableContext.class);
container.add(LocalSink.class);
container.add("function-loader", FunctionLoader.class, PredefinedFunctionLoader.class);
container.add(Reductions.class);
container.add(LocalFunctionGroup.class);
container.add(
"function-metrics-factory",
FuncionTypeMetricsFactory.class,
new FlinkFuncionTypeMetricsFactory(metricGroup));
container.add(
"function-dispatcher-metrics",
FunctionDispatcherMetrics.class,
new FlinkFunctionDispatcherMetrics(metricGroup));
// for delayed messages
container.add(
"delayed-messages-buffer-state", InternalListState.class, delayedMessagesBufferState);
container.add("delayed-message-index", MapState.class, delayMessageIndex);
container.add(
"delayed-messages-buffer",
DelayedMessagesBuffer.class,
FlinkStateDelayedMessagesBuffer.class);
container.add(
"delayed-messages-timer-service-factory", TimerServiceFactory.class, timerServiceFactory);
container.add(DelaySink.class);
container.add(DelayMessageHandler.class);
// lazy providers for the sinks
container.add("function-group", new Lazy<>(LocalFunctionGroup.class));
container.add("reductions", new Lazy<>(Reductions.class));
container.add("mailbox-executor", Executor.class, mailboxExecutor);
// for the async operations
container.add("async-operations", MapState.class, asyncOperations);
container.add(AsyncSink.class);
container.add(PendingAsyncOperations.class);
container.add("backpressure-valve", BackPressureValve.class, valve);
return container.get(Reductions.class);
}
void apply(Message message) {
enqueue(message);
processEnvelopes();
}
void enqueue(Message message) {
localFunctionGroup.enqueue(message);
}
void enqueueAsyncOperationAfterRestore(Long futureId, Message metadataMessage) {
Message adaptor =
new AsyncMessageDecorator<>(pendingAsyncOperations, futureId, metadataMessage);
enqueue(adaptor);
}
@SuppressWarnings("StatementWithEmptyBody")
void processEnvelopes() {
while (localFunctionGroup.processNextEnvelope()) {
// TODO: consider preemption if too many local messages.
}
}
void snapshotAsyncOperations() {
pendingAsyncOperations.flush();
}
}
| 6,248 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/LocalFunctionGroup.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import java.util.ArrayDeque;
import java.util.Objects;
import org.apache.flink.statefun.flink.core.di.Inject;
import org.apache.flink.statefun.flink.core.di.Label;
import org.apache.flink.statefun.flink.core.message.Message;
import org.apache.flink.statefun.sdk.FunctionType;
final class LocalFunctionGroup {
private final FunctionRepository repository;
private final ApplyingContext context;
/**
* pending is a queue of pairs (LiveFunction, Message) as enqueued via {@link #enqueue(Message)}.
* In order to avoid an object pool, or redundant allocations we store these pairs linearly one
* after another in the queue.
*/
private final ArrayDeque<Object> pending;
@Inject
LocalFunctionGroup(
@Label("function-repository") FunctionRepository repository,
@Label("applying-context") ApplyingContext context) {
this.pending = new ArrayDeque<>(4096);
this.repository = Objects.requireNonNull(repository);
this.context = Objects.requireNonNull(context);
}
void enqueue(Message message) {
FunctionType targetType = message.target().type();
LiveFunction fn = repository.get(targetType);
pending.addLast(fn);
pending.addLast(message);
}
boolean processNextEnvelope() {
Object fn = pending.pollFirst();
if (fn == null) {
return false;
}
LiveFunction liveFunction = (LiveFunction) fn;
Message message = (Message) pending.pollFirst();
context.apply(liveFunction, message);
return true;
}
}
| 6,249 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/RemoteSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import java.util.Objects;
import org.apache.flink.statefun.flink.core.message.Message;
import org.apache.flink.streaming.api.operators.Output;
import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
final class RemoteSink {
private final Output<StreamRecord<Message>> output;
private final StreamRecord<Message> record;
RemoteSink(Output<StreamRecord<Message>> output) {
this.output = Objects.requireNonNull(output);
this.record = new StreamRecord<>(null);
}
void accept(Message envelope) {
Objects.requireNonNull(envelope);
output.collect(record.replace(envelope));
}
}
| 6,250 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/FunctionGroupDispatchFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import java.util.Map;
import java.util.Objects;
import org.apache.flink.api.common.operators.MailboxExecutor;
import org.apache.flink.statefun.flink.core.StatefulFunctionsConfig;
import org.apache.flink.statefun.flink.core.message.Message;
import org.apache.flink.statefun.sdk.io.EgressIdentifier;
import org.apache.flink.streaming.api.operators.*;
import org.apache.flink.util.OutputTag;
public final class FunctionGroupDispatchFactory
implements OneInputStreamOperatorFactory<Message, Message>, YieldingOperatorFactory<Message> {
private static final long serialVersionUID = 1;
private final StatefulFunctionsConfig configuration;
private final Map<EgressIdentifier<?>, OutputTag<Object>> sideOutputs;
private transient MailboxExecutor mailboxExecutor;
public FunctionGroupDispatchFactory(
StatefulFunctionsConfig configuration,
Map<EgressIdentifier<?>, OutputTag<Object>> sideOutputs) {
this.configuration = configuration;
this.sideOutputs = sideOutputs;
}
@Override
public void setMailboxExecutor(MailboxExecutor mailboxExecutor) {
this.mailboxExecutor =
Objects.requireNonNull(mailboxExecutor, "Mailbox executor can't be NULL");
}
@Override
public <T extends StreamOperator<Message>> T createStreamOperator(
StreamOperatorParameters<Message> streamOperatorParameters) {
FunctionGroupOperator fn =
new FunctionGroupOperator(
sideOutputs,
configuration,
mailboxExecutor,
ChainingStrategy.ALWAYS,
streamOperatorParameters.getProcessingTimeService());
fn.setup(
streamOperatorParameters.getContainingTask(),
streamOperatorParameters.getStreamConfig(),
streamOperatorParameters.getOutput());
return (T) fn;
}
@Override
public void setChainingStrategy(ChainingStrategy chainingStrategy) {
// We ignore the chaining strategy, because we only use ChainingStrategy.ALWAYS
}
@Override
public ChainingStrategy getChainingStrategy() {
return ChainingStrategy.ALWAYS;
}
@Override
public Class<? extends StreamOperator> getStreamOperatorClass(ClassLoader classLoader) {
return FunctionGroupOperator.class;
}
}
| 6,251 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/AsyncSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import java.util.Deque;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executor;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.flink.statefun.flink.core.backpressure.BackPressureValve;
import org.apache.flink.statefun.flink.core.di.Inject;
import org.apache.flink.statefun.flink.core.di.Label;
import org.apache.flink.statefun.flink.core.di.Lazy;
import org.apache.flink.statefun.flink.core.message.Message;
import org.apache.flink.statefun.flink.core.metrics.FunctionDispatcherMetrics;
import org.apache.flink.statefun.flink.core.metrics.FunctionTypeMetrics;
import org.apache.flink.statefun.flink.core.metrics.FunctionTypeMetricsRepository;
import org.apache.flink.statefun.flink.core.queue.Locks;
import org.apache.flink.statefun.flink.core.queue.MpscQueue;
import org.apache.flink.statefun.sdk.Address;
final class AsyncSink {
private final PendingAsyncOperations pendingAsyncOperations;
private final Lazy<Reductions> reductions;
private final Executor operatorMailbox;
private final BackPressureValve backPressureValve;
private final FunctionTypeMetricsRepository metricsRepository;
private final FunctionDispatcherMetrics dispatcherMetrics;
private final MpscQueue<Message> completed = new MpscQueue<>(32768, Locks.jdkReentrantLock());
@Inject
AsyncSink(
PendingAsyncOperations pendingAsyncOperations,
@Label("mailbox-executor") Executor operatorMailbox,
@Label("reductions") Lazy<Reductions> reductions,
@Label("backpressure-valve") BackPressureValve backPressureValve,
@Label("function-metrics-repository") FunctionTypeMetricsRepository metricsRepository,
@Label("function-dispatcher-metrics") FunctionDispatcherMetrics dispatcherMetrics) {
this.pendingAsyncOperations = Objects.requireNonNull(pendingAsyncOperations);
this.reductions = Objects.requireNonNull(reductions);
this.operatorMailbox = Objects.requireNonNull(operatorMailbox);
this.backPressureValve = Objects.requireNonNull(backPressureValve);
this.metricsRepository = Objects.requireNonNull(metricsRepository);
this.dispatcherMetrics = Objects.requireNonNull(dispatcherMetrics);
}
<T> void accept(Address sourceAddress, Message metadata, CompletableFuture<T> future) {
final long futureId = ThreadLocalRandom.current().nextLong(); // TODO: is this is good enough?
// we keep the message in state (associated with futureId) until either:
// 1. the future successfully completes and the message is processed. The state would be
// cleared by the AsyncMessageDecorator after a successful application.
// 2. after recovery, we clear that state by notifying the owning function that we don't know
// what happened
// with that particular async operation.
pendingAsyncOperations.add(sourceAddress, futureId, metadata);
backPressureValve.notifyAsyncOperationRegistered();
metricsRepository.getMetrics(sourceAddress.type()).asyncOperationRegistered();
dispatcherMetrics.asyncOperationRegistered();
future.whenComplete((result, throwable) -> enqueue(metadata, futureId, result, throwable));
}
/**
* Requests to stop processing any further input for that address, as long as there is an
* uncompleted async operation (owned by @address).
*
* @param address the address
*/
void blockAddress(Address address) {
backPressureValve.blockAddress(address);
metricsRepository.getMetrics(address.type()).blockedAddress();
}
private <T> void enqueue(Message message, long futureId, T result, Throwable throwable) {
AsyncMessageDecorator<T> decoratedMessage =
new AsyncMessageDecorator<>(pendingAsyncOperations, futureId, message, result, throwable);
final int size = completed.add(decoratedMessage);
if (size == 1) {
// the queue has become non empty, we need to schedule a drain operation.
operatorMailbox.execute(this::drainOnOperatorThread);
}
}
private void drainOnOperatorThread() {
Deque<Message> batchOfCompletedFutures = completed.drainAll();
Reductions reductions = this.reductions.get();
Message message;
while ((message = batchOfCompletedFutures.poll()) != null) {
Address target = message.target();
FunctionTypeMetrics functionMetrics = metricsRepository.getMetrics(target.type());
// must check whether address was blocked BEFORE notifying completion
if (backPressureValve.isAddressBlocked(target)) {
functionMetrics.unblockedAddress();
}
backPressureValve.notifyAsyncOperationCompleted(target);
functionMetrics.asyncOperationCompleted();
dispatcherMetrics.asyncOperationCompleted();
reductions.enqueue(message);
}
reductions.processEnvelopes();
}
}
| 6,252 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/ReusableContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import java.time.Duration;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
import org.apache.flink.statefun.flink.core.backpressure.InternalContext;
import org.apache.flink.statefun.flink.core.di.Inject;
import org.apache.flink.statefun.flink.core.di.Label;
import org.apache.flink.statefun.flink.core.message.Message;
import org.apache.flink.statefun.flink.core.message.MessageFactory;
import org.apache.flink.statefun.flink.core.metrics.FunctionTypeMetrics;
import org.apache.flink.statefun.flink.core.state.State;
import org.apache.flink.statefun.sdk.Address;
import org.apache.flink.statefun.sdk.io.EgressIdentifier;
import org.apache.flink.statefun.sdk.metrics.Metrics;
final class ReusableContext implements ApplyingContext, InternalContext {
private final Partition thisPartition;
private final LocalSink localSink;
private final RemoteSink remoteSink;
private final DelaySink delaySink;
private final AsyncSink asyncSink;
private final SideOutputSink sideOutputSink;
private final State state;
private final MessageFactory messageFactory;
private Message in;
private LiveFunction function;
@Inject
ReusableContext(
Partition partition,
LocalSink localSink,
RemoteSink remoteSink,
DelaySink delaySink,
AsyncSink asyncSink,
SideOutputSink sideoutputSink,
@Label("state") State state,
MessageFactory messageFactory) {
this.thisPartition = Objects.requireNonNull(partition);
this.localSink = Objects.requireNonNull(localSink);
this.remoteSink = Objects.requireNonNull(remoteSink);
this.delaySink = Objects.requireNonNull(delaySink);
this.sideOutputSink = Objects.requireNonNull(sideoutputSink);
this.state = Objects.requireNonNull(state);
this.messageFactory = Objects.requireNonNull(messageFactory);
this.asyncSink = Objects.requireNonNull(asyncSink);
}
@Override
public void apply(LiveFunction function, Message inMessage) {
this.in = inMessage;
this.function = function;
state.setCurrentKey(inMessage.target());
function.metrics().incomingMessage();
function.receive(this, in);
in.postApply();
this.in = null;
}
@Override
public void send(Address to, Object what) {
Objects.requireNonNull(to);
Objects.requireNonNull(what);
Message envelope = messageFactory.from(self(), to, what);
if (thisPartition.contains(to)) {
localSink.accept(envelope);
function.metrics().outgoingLocalMessage();
} else {
remoteSink.accept(envelope);
function.metrics().outgoingRemoteMessage();
}
}
@Override
public <T> void send(EgressIdentifier<T> egress, T what) {
Objects.requireNonNull(egress);
Objects.requireNonNull(what);
function.metrics().outgoingEgressMessage();
sideOutputSink.accept(egress, what);
}
@Override
public void sendAfter(Duration delay, Address to, Object message) {
Objects.requireNonNull(delay);
Objects.requireNonNull(to);
Objects.requireNonNull(message);
Message envelope = messageFactory.from(self(), to, message);
delaySink.accept(envelope, delay.toMillis());
}
@Override
public void sendAfter(Duration delay, Address to, Object message, String cancellationToken) {
Objects.requireNonNull(delay);
Objects.requireNonNull(to);
Objects.requireNonNull(message);
Objects.requireNonNull(cancellationToken);
Message envelope = messageFactory.from(self(), to, message, cancellationToken);
delaySink.accept(envelope, delay.toMillis());
}
@Override
public void cancelDelayedMessage(String cancellationToken) {
Objects.requireNonNull(cancellationToken);
delaySink.removeMessageByCancellationToken(cancellationToken);
}
@Override
public <M, T> void registerAsyncOperation(M metadata, CompletableFuture<T> future) {
Objects.requireNonNull(metadata);
Objects.requireNonNull(future);
Message message = messageFactory.from(self(), self(), metadata);
asyncSink.accept(self(), message, future);
}
@Override
public Metrics metrics() {
return function.metrics().functionTypeScopedMetrics();
}
@Override
public void awaitAsyncOperationComplete() {
asyncSink.blockAddress(self());
}
@Override
public FunctionTypeMetrics functionTypeMetrics() {
return function.metrics();
}
@Override
public Address caller() {
return in.source();
}
@Override
public Address self() {
return in.target();
}
}
| 6,253 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/DelayMessageHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import java.util.Objects;
import java.util.function.Consumer;
import org.apache.flink.statefun.flink.core.di.Inject;
import org.apache.flink.statefun.flink.core.di.Label;
import org.apache.flink.statefun.flink.core.di.Lazy;
import org.apache.flink.statefun.flink.core.message.Message;
/**
* Handles any of the delayed message that needs to be fired at a specific timestamp. This handler
* dispatches {@linkplain Message}s to either remotely (shuffle) or locally.
*/
final class DelayMessageHandler implements Consumer<Message> {
private final RemoteSink remoteSink;
private final Lazy<Reductions> reductions;
private final Partition thisPartition;
@Inject
public DelayMessageHandler(
RemoteSink remoteSink,
@Label("reductions") Lazy<Reductions> reductions,
Partition partition) {
this.remoteSink = Objects.requireNonNull(remoteSink);
this.reductions = Objects.requireNonNull(reductions);
this.thisPartition = Objects.requireNonNull(partition);
}
@Override
public void accept(Message message) {
if (thisPartition.contains(message.target())) {
reductions.get().enqueue(message);
} else {
remoteSink.accept(message);
}
}
public void onStart() {}
public void onComplete() {
reductions.get().processEnvelopes();
}
}
| 6,254 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/Partition.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import static org.apache.flink.runtime.state.KeyGroupRangeAssignment.assignKeyToParallelOperator;
import org.apache.flink.statefun.flink.core.common.KeyBy;
import org.apache.flink.statefun.sdk.Address;
class Partition {
private final int maxParallelism;
private final int parallelism;
private final int thisOperatorIndex;
Partition(int maxParallelism, int parallelism, int thisOperatorIndex) {
this.maxParallelism = maxParallelism;
this.parallelism = parallelism;
this.thisOperatorIndex = thisOperatorIndex;
}
boolean contains(Address address) {
final int destinationOperatorIndex =
assignKeyToParallelOperator(KeyBy.apply(address), maxParallelism, parallelism);
return thisOperatorIndex == destinationOperatorIndex;
}
}
| 6,255 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/LiveFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import org.apache.flink.statefun.flink.core.message.Message;
import org.apache.flink.statefun.flink.core.metrics.FunctionTypeMetrics;
import org.apache.flink.statefun.sdk.Context;
interface LiveFunction {
void receive(Context context, Message message);
FunctionTypeMetrics metrics();
}
| 6,256 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/FlinkStateDelayedMessagesBuffer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.OptionalLong;
import java.util.function.Consumer;
import javax.annotation.Nullable;
import org.apache.flink.api.common.state.MapState;
import org.apache.flink.runtime.state.internal.InternalListState;
import org.apache.flink.statefun.flink.core.di.Inject;
import org.apache.flink.statefun.flink.core.di.Label;
import org.apache.flink.statefun.flink.core.message.Message;
final class FlinkStateDelayedMessagesBuffer implements DelayedMessagesBuffer {
static final String BUFFER_STATE_NAME = "delayed-messages-buffer";
static final String INDEX_STATE_NAME = "delayed-message-index";
private final InternalListState<String, Long, Message> bufferState;
private final MapState<String, Long> cancellationTokenToTimestamp;
@Inject
FlinkStateDelayedMessagesBuffer(
@Label("delayed-messages-buffer-state") InternalListState<String, Long, Message> bufferState,
@Label("delayed-message-index") MapState<String, Long> cancellationTokenToTimestamp) {
this.bufferState = Objects.requireNonNull(bufferState);
this.cancellationTokenToTimestamp = Objects.requireNonNull(cancellationTokenToTimestamp);
}
@Override
public void forEachMessageAt(long timestamp, Consumer<Message> fn) {
try {
forEachMessageThrows(timestamp, fn);
} catch (Exception e) {
throw new IllegalStateException(e);
}
}
@Override
public OptionalLong removeMessageByCancellationToken(String token) {
try {
return remove(token);
} catch (Exception e) {
throw new IllegalStateException(
"Failed clearing a message with a cancellation token " + token, e);
}
}
@Override
public void add(Message message, long untilTimestamp) {
try {
addThrows(message, untilTimestamp);
} catch (Exception e) {
throw new RuntimeException("Error adding delayed message to state buffer: " + message, e);
}
}
// -----------------------------------------------------------------------------------------------------
// Internal
// -----------------------------------------------------------------------------------------------------
private void forEachMessageThrows(long timestamp, Consumer<Message> fn) throws Exception {
bufferState.setCurrentNamespace(timestamp);
for (Message message : bufferState.get()) {
removeMessageIdMapping(message);
fn.accept(message);
}
bufferState.clear();
}
private void addThrows(Message message, long untilTimestamp) throws Exception {
bufferState.setCurrentNamespace(untilTimestamp);
bufferState.add(message);
Optional<String> maybeToken = message.cancellationToken();
if (!maybeToken.isPresent()) {
return;
}
String cancellationToken = maybeToken.get();
@Nullable Long previousTimestamp = cancellationTokenToTimestamp.get(cancellationToken);
if (previousTimestamp != null) {
throw new IllegalStateException(
"Trying to associate a message with cancellation token "
+ cancellationToken
+ " and timestamp "
+ untilTimestamp
+ ", but a message with the same cancellation token exists and with a timestamp "
+ previousTimestamp);
}
cancellationTokenToTimestamp.put(cancellationToken, untilTimestamp);
}
private OptionalLong remove(String cancellationToken) throws Exception {
final @Nullable Long untilTimestamp = cancellationTokenToTimestamp.get(cancellationToken);
if (untilTimestamp == null) {
// The message associated with @cancellationToken has already been delivered, or previously
// removed.
return OptionalLong.empty();
}
cancellationTokenToTimestamp.remove(cancellationToken);
bufferState.setCurrentNamespace(untilTimestamp);
List<Message> newList = removeMessageByToken(bufferState.get(), cancellationToken);
if (!newList.isEmpty()) {
// There are more messages to process, so we indicate to the caller that
// they should NOT cancel the timer.
bufferState.update(newList);
return OptionalLong.empty();
}
// There are no more message to remove, we clear the buffer and indicate
// to our caller to remove the timer for @untilTimestamp
bufferState.clear();
return OptionalLong.of(untilTimestamp);
}
// ---------------------------------------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------------------------------------
private void removeMessageIdMapping(Message message) throws Exception {
Optional<String> maybeToken = message.cancellationToken();
if (maybeToken.isPresent()) {
cancellationTokenToTimestamp.remove(maybeToken.get());
}
}
private static List<Message> removeMessageByToken(Iterable<Message> messages, String token) {
ArrayList<Message> newList = new ArrayList<>();
for (Message message : messages) {
Optional<String> thisMessageId = message.cancellationToken();
if (!thisMessageId.isPresent() || !Objects.equals(thisMessageId.get(), token)) {
newList.add(message);
}
}
return newList;
}
}
| 6,257 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/FunctionLoader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import org.apache.flink.statefun.sdk.FunctionType;
import org.apache.flink.statefun.sdk.StatefulFunction;
interface FunctionLoader {
StatefulFunction load(FunctionType type);
}
| 6,258 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/FunctionGroupOperator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;
import org.apache.flink.api.common.operators.MailboxExecutor;
import org.apache.flink.api.common.state.ListStateDescriptor;
import org.apache.flink.api.common.state.MapState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.typeutils.TypeSerializer;
import org.apache.flink.api.common.typeutils.base.LongSerializer;
import org.apache.flink.runtime.state.KeyedStateBackend;
import org.apache.flink.runtime.state.StateSnapshotContext;
import org.apache.flink.runtime.state.internal.InternalListState;
import org.apache.flink.statefun.flink.core.StatefulFunctionsConfig;
import org.apache.flink.statefun.flink.core.StatefulFunctionsUniverse;
import org.apache.flink.statefun.flink.core.StatefulFunctionsUniverses;
import org.apache.flink.statefun.flink.core.backpressure.BackPressureValve;
import org.apache.flink.statefun.flink.core.backpressure.ThresholdBackPressureValve;
import org.apache.flink.statefun.flink.core.common.MailboxExecutorFacade;
import org.apache.flink.statefun.flink.core.common.ManagingResources;
import org.apache.flink.statefun.flink.core.message.Message;
import org.apache.flink.statefun.flink.core.message.MessageFactory;
import org.apache.flink.statefun.sdk.FunctionType;
import org.apache.flink.statefun.sdk.StatefulFunctionProvider;
import org.apache.flink.statefun.sdk.io.EgressIdentifier;
import org.apache.flink.streaming.api.operators.AbstractStreamOperator;
import org.apache.flink.streaming.api.operators.ChainingStrategy;
import org.apache.flink.streaming.api.operators.OneInputStreamOperator;
import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
import org.apache.flink.streaming.runtime.tasks.ProcessingTimeService;
import org.apache.flink.util.OutputTag;
public class FunctionGroupOperator extends AbstractStreamOperator<Message>
implements OneInputStreamOperator<Message, Message> {
private static final long serialVersionUID = 1L;
// -- configuration
private final Map<EgressIdentifier<?>, OutputTag<Object>> sideOutputs;
private final StatefulFunctionsConfig configuration;
// -- runtime
private transient Reductions reductions;
private transient MailboxExecutor mailboxExecutor;
private transient BackPressureValve backPressureValve;
private transient List<ManagingResources> managingResources;
FunctionGroupOperator(
Map<EgressIdentifier<?>, OutputTag<Object>> sideOutputs,
StatefulFunctionsConfig configuration,
MailboxExecutor mailboxExecutor,
ChainingStrategy chainingStrategy,
ProcessingTimeService processingTimeService) {
this.sideOutputs = Objects.requireNonNull(sideOutputs);
this.configuration = Objects.requireNonNull(configuration);
this.mailboxExecutor = Objects.requireNonNull(mailboxExecutor);
this.chainingStrategy = chainingStrategy;
this.processingTimeService = processingTimeService;
}
// ------------------------------------------------------------------------------------------------------------------
// Operator API
// ------------------------------------------------------------------------------------------------------------------
@Override
public void processElement(StreamRecord<Message> record) throws InterruptedException {
while (backPressureValve.shouldBackPressure()) {
mailboxExecutor.yield();
}
reductions.apply(record.getValue());
}
@Override
public void open() throws Exception {
super.open();
final StatefulFunctionsUniverse statefulFunctionsUniverse =
statefulFunctionsUniverse(configuration);
final TypeSerializer<Message> envelopeSerializer =
getOperatorConfig().getTypeSerializerIn(0, getContainingTask().getUserCodeClassLoader());
final MapStateDescriptor<Long, Message> asyncOperationStateDescriptor =
new MapStateDescriptor<>(
"asyncOperations", LongSerializer.INSTANCE, envelopeSerializer.duplicate());
final ListStateDescriptor<Message> delayedMessageStateDescriptor =
new ListStateDescriptor<>(
FlinkStateDelayedMessagesBuffer.BUFFER_STATE_NAME, envelopeSerializer.duplicate());
final MapStateDescriptor<String, Long> delayedMessageIndexDescriptor =
new MapStateDescriptor<>(
FlinkStateDelayedMessagesBuffer.INDEX_STATE_NAME, String.class, Long.class);
final MapState<String, Long> delayedMessageIndex =
getRuntimeContext().getMapState(delayedMessageIndexDescriptor);
final MapState<Long, Message> asyncOperationState =
getRuntimeContext().getMapState(asyncOperationStateDescriptor);
Objects.requireNonNull(mailboxExecutor, "MailboxExecutor is unexpectedly NULL");
this.backPressureValve =
new ThresholdBackPressureValve(configuration.getMaxAsyncOperationsPerTask());
//
// Remember what function providers are managing resources, so that we can close them when
// this task closes.
this.managingResources =
resourceManagingFunctionProviders(statefulFunctionsUniverse.functions());
//
// the core logic of applying messages to functions.
//
this.reductions =
Reductions.create(
backPressureValve,
statefulFunctionsUniverse,
getRuntimeContext(),
getKeyedStateBackend(),
new FlinkTimerServiceFactory(
super.getTimeServiceManager().orElseThrow(IllegalStateException::new)),
delayedMessagesBufferState(delayedMessageStateDescriptor),
delayedMessageIndex,
sideOutputs,
output,
MessageFactory.forKey(statefulFunctionsUniverse.messageFactoryKey()),
new MailboxExecutorFacade(mailboxExecutor, "Stateful Functions Mailbox"),
getRuntimeContext().getMetricGroup().addGroup("functions"),
asyncOperationState);
//
// expire all the pending async operations.
//
AsyncOperationFailureNotifier.fireExpiredAsyncOperations(
asyncOperationStateDescriptor, reductions, getKeyedStateBackend());
}
@Override
public void snapshotState(StateSnapshotContext context) throws Exception {
super.snapshotState(context);
reductions.snapshotAsyncOperations();
}
@Override
public void close() throws Exception {
try {
closeOrDispose();
} finally {
super.close();
}
}
private void closeOrDispose() {
final List<ManagingResources> managingResources = this.managingResources;
if (managingResources == null) {
// dispose can be called before state initialization was completed (for example a failure
// during initialization).
return;
}
for (ManagingResources withResources : managingResources) {
try {
withResources.shutdown();
} catch (Throwable t) {
LOG.warn("Exception caught during close. It would be silently ignored.", t);
}
}
}
// ------------------------------------------------------------------------------------------------------------------
// Helpers
// ------------------------------------------------------------------------------------------------------------------
private InternalListState<String, Long, Message> delayedMessagesBufferState(
ListStateDescriptor<Message> delayedMessageStateDescriptor) {
try {
KeyedStateBackend<String> keyedStateBackend = getKeyedStateBackend();
return (InternalListState<String, Long, Message>)
keyedStateBackend.getOrCreateKeyedState(
LongSerializer.INSTANCE, delayedMessageStateDescriptor);
} catch (Exception e) {
throw new RuntimeException("Error registered Flink state for delayed messages buffer.", e);
}
}
private StatefulFunctionsUniverse statefulFunctionsUniverse(
StatefulFunctionsConfig configuration) {
final ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
return StatefulFunctionsUniverses.get(classLoader, configuration);
}
/**
* returns a list of {@linkplain StatefulFunctionProvider} that implement the (internal) marker
* interface {@linkplain ManagingResources}.
*/
private static List<ManagingResources> resourceManagingFunctionProviders(
Map<FunctionType, StatefulFunctionProvider> functionProviders) {
return functionProviders.values().stream()
.filter(provider -> provider instanceof ManagingResources)
.map(provider -> (ManagingResources) provider)
.collect(Collectors.toList());
}
}
| 6,259 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/AsyncOperationFailureNotifier.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import java.util.Map.Entry;
import java.util.Objects;
import org.apache.flink.api.common.state.MapState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.runtime.state.KeyedStateBackend;
import org.apache.flink.runtime.state.KeyedStateFunction;
import org.apache.flink.runtime.state.VoidNamespace;
import org.apache.flink.runtime.state.VoidNamespaceSerializer;
import org.apache.flink.statefun.flink.core.message.Message;
final class AsyncOperationFailureNotifier
implements KeyedStateFunction<String, MapState<Long, Message>> {
static void fireExpiredAsyncOperations(
MapStateDescriptor<Long, Message> asyncOperationStateDescriptor,
Reductions reductions,
KeyedStateBackend<String> keyedStateBackend)
throws Exception {
AsyncOperationFailureNotifier asyncOperationFailureNotifier =
new AsyncOperationFailureNotifier(reductions);
keyedStateBackend.applyToAllKeys(
VoidNamespace.get(),
VoidNamespaceSerializer.INSTANCE,
asyncOperationStateDescriptor,
asyncOperationFailureNotifier);
if (asyncOperationFailureNotifier.enqueued()) {
reductions.processEnvelopes();
}
}
private final Reductions reductions;
private boolean enqueued;
private AsyncOperationFailureNotifier(Reductions reductions) {
this.reductions = Objects.requireNonNull(reductions);
}
@Override
public void process(String key, MapState<Long, Message> state) throws Exception {
for (Entry<Long, Message> entry : state.entries()) {
Long futureId = entry.getKey();
Message metadataMessage = entry.getValue();
reductions.enqueueAsyncOperationAfterRestore(futureId, metadataMessage);
enqueued = true;
}
}
private boolean enqueued() {
return enqueued;
}
}
| 6,260 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/DelaySink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import java.util.Objects;
import java.util.OptionalLong;
import org.apache.flink.runtime.state.VoidNamespace;
import org.apache.flink.statefun.flink.core.di.Inject;
import org.apache.flink.statefun.flink.core.di.Label;
import org.apache.flink.statefun.flink.core.message.Message;
import org.apache.flink.streaming.api.operators.InternalTimer;
import org.apache.flink.streaming.api.operators.InternalTimerService;
import org.apache.flink.streaming.api.operators.Triggerable;
import org.apache.flink.util.Preconditions;
final class DelaySink implements Triggerable<String, VoidNamespace> {
private final InternalTimerService<VoidNamespace> delayedMessagesTimerService;
private final DelayedMessagesBuffer delayedMessagesBuffer;
private final DelayMessageHandler delayMessageHandler;
@Inject
DelaySink(
@Label("delayed-messages-buffer") DelayedMessagesBuffer delayedMessagesBuffer,
@Label("delayed-messages-timer-service-factory")
TimerServiceFactory delayedMessagesTimerServiceFactory,
DelayMessageHandler delayMessageHandler) {
this.delayedMessagesBuffer = Objects.requireNonNull(delayedMessagesBuffer);
this.delayedMessagesTimerService = delayedMessagesTimerServiceFactory.createTimerService(this);
this.delayMessageHandler = Objects.requireNonNull(delayMessageHandler);
}
void accept(Message message, long delayMillis) {
Objects.requireNonNull(message);
Preconditions.checkArgument(delayMillis >= 0);
final long triggerTime = delayedMessagesTimerService.currentProcessingTime() + delayMillis;
delayedMessagesTimerService.registerProcessingTimeTimer(VoidNamespace.INSTANCE, triggerTime);
delayedMessagesBuffer.add(message, triggerTime);
}
@Override
public void onProcessingTime(InternalTimer<String, VoidNamespace> timer) {
delayMessageHandler.onStart();
delayedMessagesBuffer.forEachMessageAt(timer.getTimestamp(), delayMessageHandler);
delayMessageHandler.onComplete();
}
@Override
public void onEventTime(InternalTimer<String, VoidNamespace> timer) {
throw new UnsupportedOperationException(
"Delayed messages with event time semantics is not supported.");
}
void removeMessageByCancellationToken(String cancellationToken) {
Objects.requireNonNull(cancellationToken);
OptionalLong timerToClear =
delayedMessagesBuffer.removeMessageByCancellationToken(cancellationToken);
if (timerToClear.isPresent()) {
long timestamp = timerToClear.getAsLong();
delayedMessagesTimerService.deleteProcessingTimeTimer(VoidNamespace.INSTANCE, timestamp);
}
}
}
| 6,261 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/functions/DelayedMessagesBuffer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import java.util.OptionalLong;
import java.util.function.Consumer;
import org.apache.flink.statefun.flink.core.message.Message;
interface DelayedMessagesBuffer {
/** Add a message to be fired at a specific timestamp */
void add(Message message, long untilTimestamp);
/** Apply @fn for each delayed message that is meant to be fired at @timestamp. */
void forEachMessageAt(long timestamp, Consumer<Message> fn);
/**
* @param token a message cancellation token to delete.
* @return an optional timestamp that this message was meant to be fired at. The timestamp will be
* present only if this message was the last message registered to fire at that timestamp.
* (hence: safe to clear any underlying timer)
*/
OptionalLong removeMessageByCancellationToken(String token);
}
| 6,262 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/TransportClientSpec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn;
import java.io.Serializable;
import java.util.Objects;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonPointer;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.statefun.flink.common.json.Selectors;
import org.apache.flink.statefun.sdk.TypeName;
public final class TransportClientSpec implements Serializable {
private static final JsonPointer FACTORY_KIND = JsonPointer.compile("/type");
public static TransportClientSpec fromJsonNode(ObjectNode node) {
TypeName factoryKind =
Selectors.optionalTextAt(node, FACTORY_KIND)
.map(TypeName::parseFrom)
.orElse(TransportClientConstants.ASYNC_CLIENT_FACTORY_TYPE);
return new TransportClientSpec(factoryKind, node);
}
private final TypeName factoryKind;
private final ObjectNode specNode;
public TransportClientSpec(TypeName factoryKind, ObjectNode properties) {
this.factoryKind = Objects.requireNonNull(factoryKind);
this.specNode = Objects.requireNonNull(properties);
}
public TypeName factoryKind() {
return factoryKind;
}
public ObjectNode specNode() {
return specNode;
}
}
| 6,263 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/TransportClientConstants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn;
import org.apache.flink.statefun.sdk.TypeName;
public final class TransportClientConstants {
public static final TypeName OKHTTP_CLIENT_FACTORY_TYPE =
TypeName.parseFrom("io.statefun.transports.v1/okhttp");
public static final TypeName ASYNC_CLIENT_FACTORY_TYPE =
TypeName.parseFrom("io.statefun.transports.v1/async");
}
| 6,264 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/DefaultHttpRequestReplyClientSpec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn;
import java.time.Duration;
import java.util.Objects;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonSetter;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonProcessingException;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
public final class DefaultHttpRequestReplyClientSpec {
@JsonProperty("timeouts")
private Timeouts timeouts = new Timeouts();
@JsonSetter("timeouts")
public void setTimeouts(Timeouts timeouts) {
validateTimeouts(
timeouts.callTimeout, timeouts.connectTimeout, timeouts.readTimeout, timeouts.writeTimeout);
this.timeouts = timeouts;
}
public Timeouts getTimeouts() {
return timeouts;
}
public ObjectNode toJson(ObjectMapper objectMapper) {
return objectMapper.valueToTree(this);
}
static DefaultHttpRequestReplyClientSpec fromJson(ObjectMapper objectMapper, JsonNode jsonNode)
throws JsonProcessingException {
return objectMapper.treeToValue(jsonNode, DefaultHttpRequestReplyClientSpec.class);
}
private static void validateTimeouts(
Duration callTimeout, Duration connectTimeout, Duration readTimeout, Duration writeTimeout) {
if (connectTimeout.compareTo(callTimeout) > 0) {
throw new IllegalArgumentException("Connect timeout cannot be larger than request timeout.");
}
if (readTimeout.compareTo(callTimeout) > 0) {
throw new IllegalArgumentException("Read timeout cannot be larger than request timeout.");
}
if (writeTimeout.compareTo(callTimeout) > 0) {
throw new IllegalArgumentException("Write timeout cannot be larger than request timeout.");
}
}
public static final class Timeouts {
// default spec values
@VisibleForTesting public static final Duration DEFAULT_HTTP_TIMEOUT = Duration.ofMinutes(1);
@VisibleForTesting
public static final Duration DEFAULT_HTTP_CONNECT_TIMEOUT = Duration.ofSeconds(10);
@VisibleForTesting
public static final Duration DEFAULT_HTTP_READ_TIMEOUT = Duration.ofSeconds(10);
@VisibleForTesting
public static final Duration DEFAULT_HTTP_WRITE_TIMEOUT = Duration.ofSeconds(10);
// spec values
private Duration callTimeout = DEFAULT_HTTP_TIMEOUT;
private Duration connectTimeout = DEFAULT_HTTP_CONNECT_TIMEOUT;
private Duration readTimeout = DEFAULT_HTTP_READ_TIMEOUT;
private Duration writeTimeout = DEFAULT_HTTP_WRITE_TIMEOUT;
@JsonSetter("call")
public void setCallTimeout(Duration callTimeout) {
this.callTimeout = requireNonZeroDuration(callTimeout);
}
@JsonSetter("connect")
public void setConnectTimeout(Duration connectTimeout) {
this.connectTimeout = requireNonZeroDuration(connectTimeout);
}
@JsonSetter("read")
public void setReadTimeout(Duration readTimeout) {
this.readTimeout = requireNonZeroDuration(readTimeout);
}
@JsonSetter("write")
public void setWriteTimeout(Duration writeTimeout) {
this.writeTimeout = requireNonZeroDuration(writeTimeout);
}
public Duration getCallTimeout() {
return callTimeout;
}
public Duration getConnectTimeout() {
return connectTimeout;
}
public Duration getReadTimeout() {
return readTimeout;
}
public Duration getWriteTimeout() {
return writeTimeout;
}
private static Duration requireNonZeroDuration(Duration duration) {
Objects.requireNonNull(duration);
if (duration.equals(Duration.ZERO)) {
throw new IllegalArgumentException("Timeout durations must be larger than 0.");
}
return duration;
}
}
}
| 6,265 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/UnixDomainHttpEndpoint.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn;
import java.io.File;
import java.net.URI;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Objects;
import org.apache.flink.util.Preconditions;
/** Represents a Unix domain file path and an http endpoint */
final class UnixDomainHttpEndpoint {
/** Checks whether or not an endpoint is using UNIX domain sockets. */
static boolean validate(URI endpoint) {
String scheme = endpoint.getScheme();
return "http+unix".equalsIgnoreCase(scheme) || "https+unix".equalsIgnoreCase(scheme);
}
/** Parses a URI of the form {@code http+unix://<file system path>.sock/<http endpoint>}. */
static UnixDomainHttpEndpoint parseFrom(URI endpoint) {
Preconditions.checkArgument(validate(endpoint));
final Path path = Paths.get(endpoint.getPath());
final int sockPathIndex = indexOfSockFile(path);
final String filePath = "/" + path.subpath(0, sockPathIndex + 1).toString();
final File unixDomainFile = new File(filePath);
if (sockPathIndex == path.getNameCount() - 1) {
return new UnixDomainHttpEndpoint(unixDomainFile, "/");
}
String pathSegment = "/" + path.subpath(sockPathIndex + 1, path.getNameCount()).toString();
return new UnixDomainHttpEndpoint(unixDomainFile, pathSegment);
}
private static int indexOfSockFile(Path path) {
for (int i = 0; i < path.getNameCount(); i++) {
if (path.getName(i).toString().endsWith(".sock")) {
return i;
}
}
throw new IllegalStateException("Unix Domain Socket path should contain a .sock file");
}
final File unixDomainFile;
final String pathSegment;
private UnixDomainHttpEndpoint(File unixDomainFile, String endpoint) {
this.unixDomainFile = Objects.requireNonNull(unixDomainFile);
this.pathSegment = Objects.requireNonNull(endpoint);
}
}
| 6,266 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/OkHttpUnixSocketBridge.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.InetAddress;
import java.net.Socket;
import java.net.SocketAddress;
import java.net.UnknownHostException;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import javax.net.SocketFactory;
import okhttp3.Dns;
import okhttp3.OkHttpClient;
import org.apache.flink.util.IOUtils;
import org.newsclub.net.unix.AFUNIXSocket;
import org.newsclub.net.unix.AFUNIXSocketAddress;
/** The following class holds utilities needed to bridge unix domain sockets and okhttp client. */
final class OkHttpUnixSocketBridge {
private OkHttpUnixSocketBridge() {}
/** Configures the {@link OkHttpClient} builder to connect over a unix domain socket. */
static void configureUnixDomainSocket(OkHttpClient.Builder builder, File unixSocketFile) {
builder.socketFactory(new UnixSocketFactory(unixSocketFile)).dns(ConstantDnsLookup.INSTANCE);
}
/** resolve all host names to Ipv4 0.0.0.0 and port 0. */
private enum ConstantDnsLookup implements Dns {
INSTANCE;
@SuppressWarnings("NullableProblems")
@Override
public List<InetAddress> lookup(String hostname) throws UnknownHostException {
InetAddress address = InetAddress.getByAddress(hostname, new byte[] {0, 0, 0, 0});
return Collections.singletonList(address);
}
}
/**
* A {@code SocketFactory} that is bound to a specific path, and would return a {@code UnixSocket}
* for that path.
*/
private static final class UnixSocketFactory extends SocketFactory {
private final File unixSocketFile;
public UnixSocketFactory(File unixSocketFile) {
this.unixSocketFile = Objects.requireNonNull(unixSocketFile);
}
@Override
public Socket createSocket() {
return new UnixSocket(unixSocketFile);
}
@Override
public Socket createSocket(String s, int i) {
throw new UnsupportedOperationException();
}
@Override
public Socket createSocket(String s, int i, InetAddress inetAddress, int i1) {
throw new UnsupportedOperationException();
}
@Override
public Socket createSocket(InetAddress inetAddress, int i) {
throw new UnsupportedOperationException();
}
@Override
public Socket createSocket(InetAddress inetAddress, int i, InetAddress inetAddress1, int i1) {
throw new UnsupportedOperationException();
}
}
/**
* A {@code Socket} that is bound to a specific unix socket file, and delegates the relevant
* operations to {@link AFUNIXSocket}.
*/
private static final class UnixSocket extends Socket {
private final File unixSocketFile;
private AFUNIXSocket delegate;
UnixSocket(File unixSocketFile) {
this.unixSocketFile = Objects.requireNonNull(unixSocketFile);
}
@Override
public void connect(SocketAddress endpoint, int timeout) throws IOException {
delegate = AFUNIXSocket.newInstance();
delegate.connect(new AFUNIXSocketAddress(unixSocketFile), timeout);
delegate.setSoTimeout(timeout);
}
@Override
public void bind(SocketAddress bindpoint) throws IOException {
delegate.bind(bindpoint);
}
@Override
public boolean isConnected() {
return delegate != null && delegate.isConnected();
}
@Override
public OutputStream getOutputStream() throws IOException {
return delegate.getOutputStream();
}
@Override
public InputStream getInputStream() throws IOException {
return delegate.getInputStream();
}
@Override
public synchronized void close() {
IOUtils.closeSocket(delegate);
delegate = null;
}
@Override
public boolean isClosed() {
return delegate.isClosed();
}
@Override
public synchronized void setSoTimeout(int timeout) {
// noop.
// we set the timeout after connecting
}
}
}
| 6,267 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/TargetFunctions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn;
import java.io.Serializable;
import java.util.Objects;
import org.apache.flink.statefun.sdk.FunctionType;
import org.apache.flink.statefun.sdk.FunctionTypeNamespaceMatcher;
import org.apache.flink.statefun.sdk.TypeName;
public abstract class TargetFunctions implements Serializable {
public static TargetFunctions fromPatternString(String patternString) {
TypeName targetTypeName = TypeName.parseFrom(patternString);
if (targetTypeName.namespace().contains("*")) {
throw new IllegalArgumentException(
"Invalid syntax for target functions. Only <namespace>/<name> or <namespace>/* are supported.");
}
if (targetTypeName.name().equals("*")) {
return TargetFunctions.namespace(targetTypeName.namespace());
}
if (targetTypeName.name().contains("*")) {
throw new IllegalArgumentException(
"Invalid syntax for target functions. Only <namespace>/<name> or <namespace>/* are supported.");
}
final FunctionType functionType =
new FunctionType(targetTypeName.namespace(), targetTypeName.name());
return TargetFunctions.functionType(functionType);
}
public static TargetFunctions namespace(String namespace) {
return new TargetFunctions.NamespaceTarget(
FunctionTypeNamespaceMatcher.targetNamespace(namespace));
}
public static TargetFunctions functionType(FunctionType functionType) {
return new TargetFunctions.FunctionTypeTarget(functionType);
}
public boolean isSpecificFunctionType() {
return this.getClass() == TargetFunctions.FunctionTypeTarget.class;
}
public boolean isNamespace() {
return this.getClass() == TargetFunctions.NamespaceTarget.class;
}
public abstract FunctionTypeNamespaceMatcher asNamespace();
public abstract FunctionType asSpecificFunctionType();
private static class NamespaceTarget extends TargetFunctions {
private static final long serialVersionUID = 1;
private final FunctionTypeNamespaceMatcher namespaceMatcher;
private NamespaceTarget(FunctionTypeNamespaceMatcher namespaceMatcher) {
this.namespaceMatcher = Objects.requireNonNull(namespaceMatcher);
}
@Override
public FunctionTypeNamespaceMatcher asNamespace() {
return namespaceMatcher;
}
@Override
public FunctionType asSpecificFunctionType() {
throw new IllegalStateException("This target is not a specific function type");
}
}
private static class FunctionTypeTarget extends TargetFunctions {
private static final long serialVersionUID = 1;
private final FunctionType functionType;
private FunctionTypeTarget(FunctionType functionType) {
this.functionType = Objects.requireNonNull(functionType);
}
@Override
public FunctionTypeNamespaceMatcher asNamespace() {
throw new IllegalStateException("This target is not a namespace.");
}
@Override
public FunctionType asSpecificFunctionType() {
return functionType;
}
}
}
| 6,268 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/RetryingCallback.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn;
import java.io.IOException;
import java.time.Duration;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.function.BooleanSupplier;
import okhttp3.Call;
import okhttp3.Callback;
import okhttp3.Response;
import okio.Timeout;
import org.apache.flink.statefun.flink.core.backpressure.BoundedExponentialBackoff;
import org.apache.flink.statefun.flink.core.metrics.RemoteInvocationMetrics;
import org.apache.flink.statefun.flink.core.reqreply.ToFunctionRequestSummary;
import org.apache.flink.util.function.RunnableWithException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@SuppressWarnings("NullableProblems")
final class RetryingCallback implements Callback {
private static final Duration INITIAL_BACKOFF_DURATION = Duration.ofMillis(10);
private static final Set<Integer> RETRYABLE_HTTP_CODES =
new HashSet<>(Arrays.asList(409, 420, 408, 429, 499, 500));
private static final Logger LOG = LoggerFactory.getLogger(RetryingCallback.class);
private final CompletableFuture<Response> resultFuture;
private final BoundedExponentialBackoff backoff;
private final ToFunctionRequestSummary requestSummary;
private final RemoteInvocationMetrics metrics;
private final BooleanSupplier isShutdown;
private long requestStarted;
RetryingCallback(
ToFunctionRequestSummary requestSummary,
RemoteInvocationMetrics metrics,
Timeout timeout,
BooleanSupplier isShutdown) {
this.resultFuture = new CompletableFuture<>();
this.backoff = new BoundedExponentialBackoff(INITIAL_BACKOFF_DURATION, duration(timeout));
this.requestSummary = requestSummary;
this.metrics = metrics;
this.isShutdown = Objects.requireNonNull(isShutdown);
}
CompletableFuture<Response> future() {
return resultFuture;
}
void attachToCall(Call call) {
this.requestStarted = System.nanoTime();
call.enqueue(this);
}
@Override
public void onFailure(Call call, IOException cause) {
tryWithFuture(() -> onFailureUnsafe(call, cause));
}
@Override
public void onResponse(Call call, Response response) {
tryWithFuture(() -> onResponseUnsafe(call, response));
}
private void onFailureUnsafe(Call call, IOException cause) {
if (isShutdown.getAsBoolean()) {
throw new IllegalStateException("An exception caught during shutdown.", cause);
}
LOG.warn(
"Retriable exception caught while trying to deliver a message: " + requestSummary, cause);
metrics.remoteInvocationFailures();
if (!retryAfterApplyingBackoff(call)) {
throw new IllegalStateException(
"Maximal request time has elapsed. Last cause is attached", cause);
}
}
private void onResponseUnsafe(Call call, Response response) {
if (response.isSuccessful()) {
resultFuture.complete(response);
return;
}
if (!RETRYABLE_HTTP_CODES.contains(response.code()) && response.code() < 500) {
throw new IllegalStateException("Non successful HTTP response code " + response.code());
}
if (!retryAfterApplyingBackoff(call)) {
throw new IllegalStateException(
"Maximal request time has elapsed. Last known error is: invalid HTTP response code "
+ response.code());
}
}
/**
* Retires the original call, after applying backoff.
*
* @return if the request was retried successfully, false otherwise.
*/
private boolean retryAfterApplyingBackoff(Call call) {
if (backoff.applyNow()) {
Call newCall = call.clone();
attachToCall(newCall);
return true;
}
return false;
}
/**
* Executes the runnable, and completes {@link #resultFuture} with any exceptions thrown, during
* its execution.
*/
private void tryWithFuture(RunnableWithException runnable) {
try {
endTimingRequest();
runnable.run();
} catch (Throwable t) {
resultFuture.completeExceptionally(t);
}
}
private static Duration duration(Timeout timeout) {
return Duration.ofNanos(timeout.timeoutNanos());
}
private void endTimingRequest() {
final long nanosecondsElapsed = System.nanoTime() - requestStarted;
final long millisecondsElapsed = TimeUnit.NANOSECONDS.toMillis(nanosecondsElapsed);
metrics.remoteInvocationLatency(millisecondsElapsed);
}
}
| 6,269 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/HttpFunctionEndpointSpec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn;
import java.io.Serializable;
import java.util.Objects;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.statefun.flink.core.httpfn.jsonutils.TargetFunctionsJsonDeserializer;
import org.apache.flink.statefun.flink.core.httpfn.jsonutils.UrlPathTemplateJsonDeserializer;
import org.apache.flink.statefun.sdk.TypeName;
@JsonDeserialize(builder = HttpFunctionEndpointSpec.Builder.class)
public final class HttpFunctionEndpointSpec implements Serializable {
private static final long serialVersionUID = 1;
private static final Integer DEFAULT_MAX_NUM_BATCH_REQUESTS = 1000;
private static final TransportClientSpec DEFAULT_TRANSPORT_CLIENT_SPEC =
new TransportClientSpec(
TransportClientConstants.ASYNC_CLIENT_FACTORY_TYPE,
new ObjectMapper().createObjectNode());
// ============================================================
// Request-Reply invocation protocol configurations
// ============================================================
private final TargetFunctions targetFunctions;
private final UrlPathTemplate urlPathTemplate;
private final int maxNumBatchRequests;
// ============================================================
// HTTP transport related properties
// ============================================================
private final TypeName transportClientFactoryType;
private final ObjectNode transportClientProps;
public static Builder builder(TargetFunctions targetFunctions, UrlPathTemplate urlPathTemplate) {
return new Builder(targetFunctions, urlPathTemplate);
}
private HttpFunctionEndpointSpec(
TargetFunctions targetFunctions,
UrlPathTemplate urlPathTemplate,
int maxNumBatchRequests,
TypeName transportClientFactoryType,
ObjectNode transportClientProps) {
this.targetFunctions = targetFunctions;
this.urlPathTemplate = urlPathTemplate;
this.maxNumBatchRequests = maxNumBatchRequests;
this.transportClientFactoryType = transportClientFactoryType;
this.transportClientProps = transportClientProps;
}
public TargetFunctions targetFunctions() {
return targetFunctions;
}
public UrlPathTemplate urlPathTemplate() {
return urlPathTemplate;
}
public int maxNumBatchRequests() {
return maxNumBatchRequests;
}
public TypeName transportClientFactoryType() {
return transportClientFactoryType;
}
public ObjectNode transportClientProperties() {
return transportClientProps;
}
@JsonPOJOBuilder
public static final class Builder {
private final TargetFunctions targetFunctions;
private final UrlPathTemplate urlPathTemplate;
private int maxNumBatchRequests = DEFAULT_MAX_NUM_BATCH_REQUESTS;
private TransportClientSpec transportClientSpec = DEFAULT_TRANSPORT_CLIENT_SPEC;
@JsonCreator
private Builder(
@JsonProperty("functions") @JsonDeserialize(using = TargetFunctionsJsonDeserializer.class)
TargetFunctions targetFunctions,
@JsonProperty("urlPathTemplate")
@JsonDeserialize(using = UrlPathTemplateJsonDeserializer.class)
UrlPathTemplate urlPathTemplate) {
this.targetFunctions = Objects.requireNonNull(targetFunctions);
this.urlPathTemplate = Objects.requireNonNull(urlPathTemplate);
}
@JsonProperty("maxNumBatchRequests")
public Builder withMaxNumBatchRequests(int maxNumBatchRequests) {
this.maxNumBatchRequests = maxNumBatchRequests;
return this;
}
/**
* This is marked with @JsonProperty specifically to tell Jackson to use this method when
* deserializing from Json.
*/
@JsonProperty("transport")
public Builder withTransport(ObjectNode transportNode) {
withTransport(TransportClientSpec.fromJsonNode(transportNode));
return this;
}
public Builder withTransport(TransportClientSpec transportNode) {
this.transportClientSpec = Objects.requireNonNull(transportNode);
return this;
}
public HttpFunctionEndpointSpec build() {
return new HttpFunctionEndpointSpec(
targetFunctions,
urlPathTemplate,
maxNumBatchRequests,
transportClientSpec.factoryKind(),
transportClientSpec.specNode());
}
}
}
| 6,270 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/TransportClientsModule.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn;
import com.google.auto.service.AutoService;
import java.util.Map;
import org.apache.flink.statefun.extensions.ExtensionModule;
@AutoService(ExtensionModule.class)
public class TransportClientsModule implements ExtensionModule {
@Override
public void configure(Map<String, String> globalConfigurations, Binder binder) {
binder.bindExtension(
TransportClientConstants.OKHTTP_CLIENT_FACTORY_TYPE,
DefaultHttpRequestReplyClientFactory.INSTANCE);
}
}
| 6,271 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/UrlPathTemplate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn;
import java.io.Serializable;
import java.net.URI;
import java.util.Objects;
import org.apache.flink.statefun.sdk.FunctionType;
public final class UrlPathTemplate implements Serializable {
private static final long serialVersionUID = 1;
private static final String FUNCTION_NAME_HOLDER = "{function.name}";
private final String template;
public UrlPathTemplate(String template) {
this.template = Objects.requireNonNull(template);
}
public URI apply(FunctionType functionType) {
return URI.create(template.replace(FUNCTION_NAME_HOLDER, functionType.name()));
}
}
| 6,272 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/DefaultHttpRequestReplyClientFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn;
import static org.apache.flink.statefun.flink.core.httpfn.OkHttpUnixSocketBridge.configureUnixDomainSocket;
import java.net.URI;
import javax.annotation.Nullable;
import okhttp3.HttpUrl;
import okhttp3.OkHttpClient;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.statefun.flink.common.SetContextClassLoader;
import org.apache.flink.statefun.flink.common.json.StateFunObjectMapper;
import org.apache.flink.statefun.flink.core.reqreply.ClassLoaderSafeRequestReplyClient;
import org.apache.flink.statefun.flink.core.reqreply.RequestReplyClient;
import org.apache.flink.statefun.flink.core.reqreply.RequestReplyClientFactory;
public final class DefaultHttpRequestReplyClientFactory implements RequestReplyClientFactory {
public static final DefaultHttpRequestReplyClientFactory INSTANCE =
new DefaultHttpRequestReplyClientFactory();
private static final ObjectMapper OBJ_MAPPER = StateFunObjectMapper.create();
/** lazily initialized by {@link #createTransportClient} */
@Nullable private volatile OkHttpClient sharedClient;
private DefaultHttpRequestReplyClientFactory() {}
@Override
public RequestReplyClient createTransportClient(ObjectNode transportProperties, URI endpointUrl) {
final DefaultHttpRequestReplyClient client = createClient(transportProperties, endpointUrl);
if (Thread.currentThread().getContextClassLoader() == getClass().getClassLoader()) {
return client;
} else {
return new ClassLoaderSafeRequestReplyClient(client);
}
}
@Override
public void cleanup() {
final OkHttpClient sharedClient = this.sharedClient;
this.sharedClient = null;
OkHttpUtils.closeSilently(sharedClient);
}
private DefaultHttpRequestReplyClient createClient(
ObjectNode transportProperties, URI endpointUrl) {
try (SetContextClassLoader ignored = new SetContextClassLoader(this)) {
OkHttpClient sharedClient = this.sharedClient;
if (sharedClient == null) {
sharedClient = OkHttpUtils.newClient();
this.sharedClient = sharedClient;
}
final OkHttpClient.Builder clientBuilder = sharedClient.newBuilder();
final DefaultHttpRequestReplyClientSpec transportClientSpec =
parseTransportProperties(transportProperties);
clientBuilder.callTimeout(transportClientSpec.getTimeouts().getCallTimeout());
clientBuilder.connectTimeout(transportClientSpec.getTimeouts().getConnectTimeout());
clientBuilder.readTimeout(transportClientSpec.getTimeouts().getReadTimeout());
clientBuilder.writeTimeout(transportClientSpec.getTimeouts().getWriteTimeout());
HttpUrl url;
if (UnixDomainHttpEndpoint.validate(endpointUrl)) {
UnixDomainHttpEndpoint endpoint = UnixDomainHttpEndpoint.parseFrom(endpointUrl);
url =
new HttpUrl.Builder()
.scheme("http")
.host("unused")
.addPathSegment(endpoint.pathSegment)
.build();
configureUnixDomainSocket(clientBuilder, endpoint.unixDomainFile);
} else {
url = HttpUrl.get(endpointUrl);
}
return new DefaultHttpRequestReplyClient(
url, clientBuilder.build(), () -> isShutdown(this.sharedClient));
}
}
private boolean isShutdown(OkHttpClient previousClient) {
return DefaultHttpRequestReplyClientFactory.this.sharedClient != previousClient;
}
private static DefaultHttpRequestReplyClientSpec parseTransportProperties(
ObjectNode transportClientProperties) {
try {
return DefaultHttpRequestReplyClientSpec.fromJson(OBJ_MAPPER, transportClientProperties);
} catch (Exception e) {
throw new RuntimeException(
"Unable to parse transport client properties when creating client: ", e);
}
}
}
| 6,273 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/OkHttpUtils.java | /*
* Copyright 2019 Ververica GmbH.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn;
import java.util.concurrent.TimeUnit;
import javax.annotation.Nullable;
import okhttp3.ConnectionPool;
import okhttp3.Dispatcher;
import okhttp3.OkHttpClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
final class OkHttpUtils {
private static final Logger LOG = LoggerFactory.getLogger(OkHttpUtils.class);
private OkHttpUtils() {}
static OkHttpClient newClient() {
Dispatcher dispatcher = new Dispatcher();
dispatcher.setMaxRequestsPerHost(Integer.MAX_VALUE);
dispatcher.setMaxRequests(Integer.MAX_VALUE);
ConnectionPool connectionPool = new ConnectionPool(1024, 1, TimeUnit.MINUTES);
return new OkHttpClient.Builder()
.dispatcher(dispatcher)
.connectionPool(connectionPool)
.followRedirects(true)
.followSslRedirects(true)
.retryOnConnectionFailure(true)
.build();
}
static void closeSilently(@Nullable OkHttpClient client) {
if (client == null) {
return;
}
final Dispatcher dispatcher = client.dispatcher();
try {
dispatcher.executorService().shutdownNow();
} catch (Throwable ignored) {
}
try {
dispatcher.cancelAll();
} catch (Throwable throwable) {
LOG.warn("Exception caught while trying to close the HTTP client", throwable);
}
try {
client.connectionPool().evictAll();
} catch (Throwable throwable) {
LOG.warn("Exception caught while trying to close the HTTP connection pool", throwable);
}
}
}
| 6,274 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/HttpFunctionProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn;
import java.net.URI;
import java.util.Objects;
import javax.annotation.concurrent.NotThreadSafe;
import org.apache.flink.statefun.flink.core.common.ManagingResources;
import org.apache.flink.statefun.flink.core.reqreply.RequestReplyClientFactory;
import org.apache.flink.statefun.flink.core.reqreply.RequestReplyFunction;
import org.apache.flink.statefun.sdk.FunctionType;
import org.apache.flink.statefun.sdk.StatefulFunction;
import org.apache.flink.statefun.sdk.StatefulFunctionProvider;
@NotThreadSafe
public final class HttpFunctionProvider implements StatefulFunctionProvider, ManagingResources {
private final HttpFunctionEndpointSpec endpointSpec;
private final RequestReplyClientFactory requestReplyClientFactory;
public HttpFunctionProvider(
HttpFunctionEndpointSpec endpointSpec, RequestReplyClientFactory requestReplyClientFactory) {
this.endpointSpec = Objects.requireNonNull(endpointSpec);
this.requestReplyClientFactory = Objects.requireNonNull(requestReplyClientFactory);
}
@Override
public StatefulFunction functionOfType(FunctionType functionType) {
final URI endpointUrl = endpointSpec.urlPathTemplate().apply(functionType);
return new RequestReplyFunction(
functionType,
endpointSpec.maxNumBatchRequests(),
requestReplyClientFactory.createTransportClient(
endpointSpec.transportClientProperties(), endpointUrl));
}
@Override
public void shutdown() {
requestReplyClientFactory.cleanup();
}
}
| 6,275 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/DefaultHttpRequestReplyClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn;
import static org.apache.flink.statefun.flink.core.common.PolyglotUtil.parseProtobufOrThrow;
import static org.apache.flink.util.Preconditions.checkState;
import java.io.InputStream;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
import java.util.function.BooleanSupplier;
import okhttp3.Call;
import okhttp3.HttpUrl;
import okhttp3.MediaType;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.RequestBody;
import okhttp3.Response;
import org.apache.flink.statefun.flink.core.metrics.RemoteInvocationMetrics;
import org.apache.flink.statefun.flink.core.reqreply.RequestReplyClient;
import org.apache.flink.statefun.flink.core.reqreply.ToFunctionRequestSummary;
import org.apache.flink.statefun.sdk.reqreply.generated.FromFunction;
import org.apache.flink.statefun.sdk.reqreply.generated.ToFunction;
import org.apache.flink.util.IOUtils;
final class DefaultHttpRequestReplyClient implements RequestReplyClient {
private static final MediaType MEDIA_TYPE_BINARY = MediaType.parse("application/octet-stream");
private final HttpUrl url;
private final OkHttpClient client;
private final BooleanSupplier isShutdown;
DefaultHttpRequestReplyClient(HttpUrl url, OkHttpClient client, BooleanSupplier isShutdown) {
this.url = Objects.requireNonNull(url);
this.client = Objects.requireNonNull(client);
this.isShutdown = Objects.requireNonNull(isShutdown);
}
@Override
public CompletableFuture<FromFunction> call(
ToFunctionRequestSummary requestSummary,
RemoteInvocationMetrics metrics,
ToFunction toFunction) {
Request request =
new Request.Builder()
.url(url)
.post(RequestBody.create(MEDIA_TYPE_BINARY, toFunction.toByteArray()))
.build();
Call newCall = client.newCall(request);
RetryingCallback callback =
new RetryingCallback(requestSummary, metrics, newCall.timeout(), isShutdown);
callback.attachToCall(newCall);
return callback.future().thenApply(DefaultHttpRequestReplyClient::parseResponse);
}
private static FromFunction parseResponse(Response response) {
final InputStream httpResponseBody = responseBody(response);
try {
return parseProtobufOrThrow(FromFunction.parser(), httpResponseBody);
} finally {
IOUtils.closeQuietly(httpResponseBody);
}
}
private static InputStream responseBody(Response httpResponse) {
checkState(httpResponse.isSuccessful(), "Unexpected HTTP status code %s", httpResponse.code());
checkState(httpResponse.body() != null, "Unexpected empty HTTP response (no body)");
checkState(
Objects.equals(httpResponse.body().contentType(), MEDIA_TYPE_BINARY),
"Wrong HTTP content-type %s",
httpResponse.body().contentType());
return httpResponse.body().byteStream();
}
}
| 6,276 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/jsonutils/TargetFunctionsJsonDeserializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn.jsonutils;
import java.io.IOException;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonParser;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.DeserializationContext;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonDeserializer;
import org.apache.flink.statefun.flink.core.httpfn.TargetFunctions;
public final class TargetFunctionsJsonDeserializer extends JsonDeserializer<TargetFunctions> {
@Override
public TargetFunctions deserialize(
JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException {
return TargetFunctions.fromPatternString(jsonParser.getText());
}
}
| 6,277 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/jsonutils/UrlPathTemplateJsonDeserializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn.jsonutils;
import java.io.IOException;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonParser;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.DeserializationContext;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonDeserializer;
import org.apache.flink.statefun.flink.core.httpfn.UrlPathTemplate;
public final class UrlPathTemplateJsonDeserializer extends JsonDeserializer<UrlPathTemplate> {
@Override
public UrlPathTemplate deserialize(
JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException {
return new UrlPathTemplate(jsonParser.getText());
}
}
| 6,278 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/binders | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/binders/v1/Module.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn.binders.v1;
import com.google.auto.service.AutoService;
import java.util.Map;
import org.apache.flink.statefun.extensions.ExtensionModule;
@AutoService(ExtensionModule.class)
public final class Module implements ExtensionModule {
@Override
public void configure(Map<String, String> globalConfigurations, Binder binder) {
binder.bindExtension(HttpEndpointBinderV1.KIND_TYPE, HttpEndpointBinderV1.INSTANCE);
}
}
| 6,279 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/binders | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/binders/v1/HttpEndpointBinderV1.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn.binders.v1;
import java.util.OptionalInt;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonPointer;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.statefun.extensions.ComponentBinder;
import org.apache.flink.statefun.extensions.ComponentJsonObject;
import org.apache.flink.statefun.flink.common.json.Selectors;
import org.apache.flink.statefun.flink.core.httpfn.DefaultHttpRequestReplyClientFactory;
import org.apache.flink.statefun.flink.core.httpfn.HttpFunctionEndpointSpec;
import org.apache.flink.statefun.flink.core.httpfn.HttpFunctionProvider;
import org.apache.flink.statefun.flink.core.httpfn.TargetFunctions;
import org.apache.flink.statefun.flink.core.httpfn.TransportClientConstants;
import org.apache.flink.statefun.flink.core.httpfn.TransportClientSpec;
import org.apache.flink.statefun.flink.core.httpfn.UrlPathTemplate;
import org.apache.flink.statefun.sdk.TypeName;
import org.apache.flink.statefun.sdk.spi.StatefulFunctionModule;
/**
* Version 1 {@link ComponentBinder} for binding a {@link HttpFunctionProvider}. Corresponding
* {@link TypeName} is {@code io.statefun.endpoints.v1/http}.
*
* <p>Below is an example YAML document of the {@link ComponentJsonObject} recognized by this
* binder, with the expected types of each field:
*
* <pre>
* kind: io.statefun.endpoints.v1/http (typename)
* spec: (object)
* functions: com.foo.bar/* (typename)
* urlPathTemplate: https://bar.foo.com:8080/{function.name} (string)
* maxNumBatchRequests: 10000 (int, optional)
* timeouts: (object, optional)
* call: 1minute (duration, optional)
* connect: 20seconds (duration, optional)
* read: 30seconds (duration, optional)
* write: 3seconds (duration, optional)
* </pre>
*/
public final class HttpEndpointBinderV1 implements ComponentBinder {
static final HttpEndpointBinderV1 INSTANCE = new HttpEndpointBinderV1();
public static final TypeName KIND_TYPE = TypeName.parseFrom("io.statefun.endpoints.v1/http");
// =====================================================================
// Json pointers for backwards compatibility
// =====================================================================
private static final JsonPointer TARGET_FUNCTIONS = JsonPointer.compile("/functions");
private static final JsonPointer URL_PATH_TEMPLATE = JsonPointer.compile("/urlPathTemplate");
private static final JsonPointer MAX_NUM_BATCH_REQUESTS =
JsonPointer.compile("/maxNumBatchRequests");
private HttpEndpointBinderV1() {}
@Override
public void bind(ComponentJsonObject component, StatefulFunctionModule.Binder binder) {
validateComponent(component);
final HttpFunctionEndpointSpec spec = parseSpec(component);
final HttpFunctionProvider provider =
new HttpFunctionProvider(spec, DefaultHttpRequestReplyClientFactory.INSTANCE);
final TargetFunctions target = spec.targetFunctions();
if (target.isSpecificFunctionType()) {
binder.bindFunctionProvider(target.asSpecificFunctionType(), provider);
} else {
binder.bindFunctionProvider(target.asNamespace(), provider);
}
}
private static void validateComponent(ComponentJsonObject componentJsonObject) {
final TypeName targetBinderType = componentJsonObject.binderTypename();
if (!targetBinderType.equals(KIND_TYPE)) {
throw new IllegalStateException(
"Received unexpected ModuleComponent to bind: " + componentJsonObject);
}
}
private static HttpFunctionEndpointSpec parseSpec(ComponentJsonObject component) {
final JsonNode httpEndpointSpecNode = component.specJsonNode();
final HttpFunctionEndpointSpec.Builder specBuilder =
HttpFunctionEndpointSpec.builder(
target(httpEndpointSpecNode), urlPathTemplate(httpEndpointSpecNode));
optionalMaxNumBatchRequests(httpEndpointSpecNode)
.ifPresent(specBuilder::withMaxNumBatchRequests);
final TransportClientSpec transportClientSpec =
new TransportClientSpec(
TransportClientConstants.OKHTTP_CLIENT_FACTORY_TYPE, (ObjectNode) httpEndpointSpecNode);
specBuilder.withTransport(transportClientSpec);
return specBuilder.build();
}
private static TargetFunctions target(JsonNode functionEndpointSpecNode) {
String targetPatternString = Selectors.textAt(functionEndpointSpecNode, TARGET_FUNCTIONS);
return TargetFunctions.fromPatternString(targetPatternString);
}
private static UrlPathTemplate urlPathTemplate(JsonNode functionEndpointSpecNode) {
String template = Selectors.textAt(functionEndpointSpecNode, URL_PATH_TEMPLATE);
return new UrlPathTemplate(template);
}
private static OptionalInt optionalMaxNumBatchRequests(JsonNode functionNode) {
return Selectors.optionalIntegerAt(functionNode, MAX_NUM_BATCH_REQUESTS);
}
}
| 6,280 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/binders | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/binders/v2/Module.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn.binders.v2;
import com.google.auto.service.AutoService;
import java.util.Map;
import org.apache.flink.statefun.extensions.ExtensionModule;
@AutoService(ExtensionModule.class)
public final class Module implements ExtensionModule {
@Override
public void configure(Map<String, String> globalConfigurations, Binder binder) {
binder.bindExtension(HttpEndpointBinderV2.KIND_TYPE, HttpEndpointBinderV2.INSTANCE);
}
}
| 6,281 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/binders | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/httpfn/binders/v2/HttpEndpointBinderV2.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn.binders.v2;
import static org.apache.flink.statefun.flink.core.spi.ExtensionResolverAccessor.getExtensionResolver;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonProcessingException;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.statefun.extensions.ComponentBinder;
import org.apache.flink.statefun.extensions.ComponentJsonObject;
import org.apache.flink.statefun.flink.common.json.StateFunObjectMapper;
import org.apache.flink.statefun.flink.core.httpfn.HttpFunctionEndpointSpec;
import org.apache.flink.statefun.flink.core.httpfn.HttpFunctionProvider;
import org.apache.flink.statefun.flink.core.httpfn.TargetFunctions;
import org.apache.flink.statefun.flink.core.reqreply.RequestReplyClientFactory;
import org.apache.flink.statefun.flink.core.spi.ExtensionResolver;
import org.apache.flink.statefun.sdk.TypeName;
import org.apache.flink.statefun.sdk.spi.StatefulFunctionModule;
/**
* Version 2 {@link ComponentBinder} for binding a {@link HttpFunctionProvider}. Corresponding
* {@link TypeName} is {@code io.statefun.endpoints.v2/http}.
*
* <p>Below is an example YAML document of the {@link ComponentJsonObject} recognized by this
* binder, with the expected types of each field:
*
* <pre>
* kind: io.statefun.endpoints.v2/http (typename)
* spec: (object)
* functions: com.foo.bar/* (typename)
* urlPathTemplate: https://bar.foo.com:8080/{function.name} (string)
* maxNumBatchRequests: 10000 (int, optional)
* transports: (object, optional)
* type: io.statefun.transports.v1/okhttp (typename, optional)
* ... (remaining fields treated directly as properties)
* </pre>
*/
final class HttpEndpointBinderV2 implements ComponentBinder {
private static final ObjectMapper SPEC_OBJ_MAPPER = StateFunObjectMapper.create();
static final HttpEndpointBinderV2 INSTANCE = new HttpEndpointBinderV2();
static final TypeName KIND_TYPE = TypeName.parseFrom("io.statefun.endpoints.v2/http");
private HttpEndpointBinderV2() {}
@Override
public void bind(ComponentJsonObject component, StatefulFunctionModule.Binder binder) {
validateComponent(component);
final HttpFunctionEndpointSpec spec = parseSpec(component);
final HttpFunctionProvider provider = functionProvider(spec, getExtensionResolver(binder));
final TargetFunctions target = spec.targetFunctions();
if (target.isSpecificFunctionType()) {
binder.bindFunctionProvider(target.asSpecificFunctionType(), provider);
} else {
binder.bindFunctionProvider(target.asNamespace(), provider);
}
}
private static void validateComponent(ComponentJsonObject componentJsonObject) {
final TypeName targetBinderType = componentJsonObject.binderTypename();
if (!targetBinderType.equals(KIND_TYPE)) {
throw new IllegalStateException(
"Received unexpected ModuleComponent to bind: " + componentJsonObject);
}
}
private static HttpFunctionEndpointSpec parseSpec(ComponentJsonObject component) {
try {
return SPEC_OBJ_MAPPER.treeToValue(component.specJsonNode(), HttpFunctionEndpointSpec.class);
} catch (JsonProcessingException e) {
throw new RuntimeException("Error parsing a HttpFunctionEndpointSpec.", e);
}
}
private static HttpFunctionProvider functionProvider(
HttpFunctionEndpointSpec spec, ExtensionResolver extensionResolver) {
final RequestReplyClientFactory transportClientFactory =
extensionResolver.resolveExtension(
spec.transportClientFactoryType(), RequestReplyClientFactory.class);
return new HttpFunctionProvider(spec, transportClientFactory);
}
}
| 6,282 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/pool/SimplePool.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.pool;
import java.util.ArrayDeque;
import java.util.Objects;
import java.util.function.Supplier;
import javax.annotation.concurrent.NotThreadSafe;
/**
* Simple element pool.
*
* @param <ElementT> type of elements being pooled.
*/
@NotThreadSafe
public final class SimplePool<ElementT> {
private final ArrayDeque<ElementT> elements = new ArrayDeque<>();
private final Supplier<ElementT> supplier;
private final int maxCapacity;
public SimplePool(Supplier<ElementT> supplier, int maxCapacity) {
this.supplier = Objects.requireNonNull(supplier);
this.maxCapacity = maxCapacity;
}
public ElementT get() {
ElementT element = elements.pollFirst();
if (element != null) {
return element;
}
return supplier.get();
}
public void release(ElementT item) {
if (elements.size() < maxCapacity) {
elements.addFirst(item);
}
}
}
| 6,283 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/sdk | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/sdk/state/ApiExtension.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.sdk.state;
public class ApiExtension {
public static <T> void setPersistedValueAccessor(
PersistedValue<T> persistedValue, Accessor<T> accessor) {
persistedValue.setAccessor(accessor);
}
public static <K, V> void setPersistedTableAccessor(
PersistedTable<K, V> persistedTable, TableAccessor<K, V> accessor) {
persistedTable.setAccessor(accessor);
}
public static <E> void setPersistedAppendingBufferAccessor(
PersistedAppendingBuffer<E> persistedAppendingBuffer, AppendingBufferAccessor<E> accessor) {
persistedAppendingBuffer.setAccessor(accessor);
}
public static void setRemotePersistedValueAccessor(
RemotePersistedValue remotePersistedValue, Accessor<byte[]> accessor) {
remotePersistedValue.setAccessor(accessor);
}
public static void bindPersistedStateRegistry(
PersistedStateRegistry persistedStateRegistry, StateBinder stateBinder) {
persistedStateRegistry.bind(stateBinder);
}
}
| 6,284 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/it/unimi/dsi | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/it/unimi/dsi/fastutil/HashCommon.java | /*
* Copyright (C) 2002-2017 Sebastiano Vigna
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package it.unimi.dsi.fastutil;
/**
* NOTE: This source code was copied from the <a href="http://fastutil.di.unimi.it/">fastutil</a>
* project, and has been modified.
*
* <p>Common code for all hash-based classes.
*
* @author Sebastiano Vigna
* @see <a href="http://fastutil.di.unimi.it/">fastutil</a>
*/
public class HashCommon {
/** 2<sup>32</sup> · φ, φ = (√5 − 1)/2. */
private static final int INT_PHI = 0x9E3779B9;
/** The reciprocal of {@link #INT_PHI} modulo 2<sup>32</sup>. */
private static final int INV_INT_PHI = 0x144cbc89;
/** 2<sup>64</sup> · φ, φ = (√5 − 1)/2. */
private static final long LONG_PHI = 0x9E3779B97F4A7C15L;
/** The reciprocal of {@link #LONG_PHI} modulo 2<sup>64</sup>. */
private static final long INV_LONG_PHI = 0xf1de83e19937733dL;
protected HashCommon() {}
/**
* Avalanches the bits of an integer by applying the finalisation step of MurmurHash3.
*
* <p>This method implements the finalisation step of Austin Appleby's <a
* href="http://code.google.com/p/smhasher/">MurmurHash3</a>. Its purpose is to avalanche the bits
* of the argument to within 0.25% bias.
*
* @param x an integer.
* @return a hash value with good avalanching properties.
*/
public static int murmurHash3(int x) {
x ^= x >>> 16;
x *= 0x85ebca6b;
x ^= x >>> 13;
x *= 0xc2b2ae35;
x ^= x >>> 16;
return x;
}
/**
* Avalanches the bits of a long integer by applying the finalisation step of MurmurHash3.
*
* <p>This method implements the finalisation step of Austin Appleby's <a
* href="http://code.google.com/p/smhasher/">MurmurHash3</a>. Its purpose is to avalanche the bits
* of the argument to within 0.25% bias.
*
* @param x a long integer.
* @return a hash value with good avalanching properties.
*/
public static long murmurHash3(long x) {
x ^= x >>> 33;
x *= 0xff51afd7ed558ccdL;
x ^= x >>> 33;
x *= 0xc4ceb9fe1a85ec53L;
x ^= x >>> 33;
return x;
}
/**
* Quickly mixes the bits of an integer.
*
* <p>This method mixes the bits of the argument by multiplying by the golden ratio and
* xorshifting the result. It is borrowed from <a
* href="https://github.com/OpenHFT/Koloboke">Koloboke</a>, and it has slightly worse behaviour
* than {@link #murmurHash3(int)} (in open-addressing hash tables the average number of probes is
* slightly larger), but it's much faster.
*
* @param x an integer.
* @return a hash value obtained by mixing the bits of {@code x}.
* @see #invMix(int)
*/
public static int mix(final int x) {
final int h = x * INT_PHI;
return h ^ (h >>> 16);
}
/**
* The inverse of {@link #mix(int)}. This method is mainly useful to create unit tests.
*
* @param x an integer.
* @return a value that passed through {@link #mix(int)} would give {@code x}.
*/
public static int invMix(final int x) {
return (x ^ x >>> 16) * INV_INT_PHI;
}
/**
* Quickly mixes the bits of a long integer.
*
* <p>This method mixes the bits of the argument by multiplying by the golden ratio and
* xorshifting twice the result. It is borrowed from <a
* href="https://github.com/OpenHFT/Koloboke">Koloboke</a>, and it has slightly worse behaviour
* than {@link #murmurHash3(long)} (in open-addressing hash tables the average number of probes is
* slightly larger), but it's much faster.
*
* @param x a long integer.
* @return a hash value obtained by mixing the bits of {@code x}.
*/
public static long mix(final long x) {
long h = x * LONG_PHI;
h ^= h >>> 32;
return h ^ (h >>> 16);
}
/**
* The inverse of {@link #mix(long)}. This method is mainly useful to create unit tests.
*
* @param x a long integer.
* @return a value that passed through {@link #mix(long)} would give {@code x}.
*/
public static long invMix(long x) {
x ^= x >>> 32;
x ^= x >>> 16;
return (x ^ x >>> 32) * INV_LONG_PHI;
}
/**
* Returns the hash code that would be returned by {@link Float#hashCode()}.
*
* @param f a float.
* @return the same code as {@link Float#hashCode() new Float(f).hashCode()}.
*/
public static int float2int(final float f) {
return Float.floatToRawIntBits(f);
}
/**
* Returns the hash code that would be returned by {@link Double#hashCode()}.
*
* @param d a double.
* @return the same code as {@link Double#hashCode() new Double(f).hashCode()}.
*/
public static int double2int(final double d) {
final long l = Double.doubleToRawLongBits(d);
return (int) (l ^ (l >>> 32));
}
/**
* Returns the hash code that would be returned by {@link Long#hashCode()}.
*
* @param l a long.
* @return the same code as {@link Long#hashCode() new Long(f).hashCode()}.
*/
public static int long2int(final long l) {
return (int) (l ^ (l >>> 32));
}
/**
* Returns the least power of two greater than or equal to the specified value.
*
* <p>Note that this function will return 1 when the argument is 0.
*
* @param x an integer smaller than or equal to 2<sup>30</sup>.
* @return the least power of two greater than or equal to the specified value.
*/
public static int nextPowerOfTwo(int x) {
if (x == 0) {
return 1;
}
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
return (x | x >> 16) + 1;
}
/**
* Returns the least power of two greater than or equal to the specified value.
*
* <p>Note that this function will return 1 when the argument is 0.
*
* @param x a long integer smaller than or equal to 2<sup>62</sup>.
* @return the least power of two greater than or equal to the specified value.
*/
public static long nextPowerOfTwo(long x) {
if (x == 0) {
return 1;
}
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return (x | x >> 32) + 1;
}
/**
* Returns the maximum number of entries that can be filled before rehashing.
*
* @param n the size of the backing array.
* @param f the load factor.
* @return the maximum number of entries before rehashing.
*/
public static int maxFill(final int n, final float f) {
/* We must guarantee that there is always at least
* one free entry (even with pathological load ffunctions). */
return Math.min((int) Math.ceil(n * f), n - 1);
}
/**
* Returns the maximum number of entries that can be filled before rehashing.
*
* @param n the size of the backing array.
* @param f the load factor.
* @return the maximum number of entries before rehashing.
*/
public static long maxFill(final long n, final float f) {
/* We must guarantee that there is always at least
* one free entry (even with pathological load ffunctions). */
return Math.min((long) Math.ceil(n * f), n - 1);
}
/**
* Returns the least power of two smaller than or equal to 2<sup>30</sup> and larger than or equal
* to {@code Math.ceil(expected / f)}.
*
* @param expected the expected number of elements in a hash table.
* @param f the load factor.
* @return the minimum possible size for a backing array.
* @throws IllegalArgumentException if the necessary size is larger than 2<sup>30</sup>.
*/
public static int arraySize(final int expected, final float f) {
final long s = Math.max(2, nextPowerOfTwo((long) Math.ceil(expected / f)));
if (s > (1 << 30)) {
throw new IllegalArgumentException(
"Too large (" + expected + " expected elements with load factor " + f + ")");
}
return (int) s;
}
/**
* Returns the least power of two larger than or equal to {@code Math.ceil(expected / f)}.
*
* @param expected the expected number of elements in a hash table.
* @param f the load factor.
* @return the minimum possible size for a backing big array.
*/
public static long bigArraySize(final long expected, final float f) {
return nextPowerOfTwo((long) Math.ceil(expected / f));
}
}
| 6,285 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/it/unimi/dsi/fastutil | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/it/unimi/dsi/fastutil/objects/ObjectOpenHashMap.java | /*
* Copyright (C) 2002-2017 Sebastiano Vigna
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package it.unimi.dsi.fastutil.objects;
import static it.unimi.dsi.fastutil.HashCommon.arraySize;
import static it.unimi.dsi.fastutil.HashCommon.maxFill;
import it.unimi.dsi.fastutil.HashCommon;
import java.util.Arrays;
/**
* NOTE: This source code was copied from the <a href="http://fastutil.di.unimi.it/">fastutil</a>
* project, and has been modified.
*
* <p>A type-specific hash map with a fast, small-footprint implementation.
*
* <p>Instances of this class use a hash table to represent a map. The table is filled up to a
* specified <em>load factor</em>, and then doubled in size to accommodate new entries. If the table
* is emptied below <em>one fourth</em> of the load factor, it is halved in size; however, the table
* is never reduced to a size smaller than that at creation time: this approach makes it possible to
* create maps with a large capacity in which insertions and deletions do not cause immediately
* rehashing. Moreover, halving is not performed when deleting entries from an iterator, as it would
* interfere with the iteration process.
*
* @author Sebastiano Vigna
* @see HashCommon
* @see <a href="http://fastutil.di.unimi.it/">fastutil</a>
*/
public final class ObjectOpenHashMap<K, V> {
/** The initial default size of a hash table. */
private static final int DEFAULT_INITIAL_SIZE = 16;
/** The default load factor of a hash table. */
private static final float DEFAULT_LOAD_FACTOR = .75f;
/** We never resize below this threshold, which is the construction-time {#n}. */
private final transient int minN;
/** The acceptable load factor. */
private final float f;
/** The array of keys. */
private transient K[] key;
/** The array of values. */
private transient V[] value;
/** The mask for wrapping a position counter. */
private transient int mask;
/** Whether this map contains the key zero. */
private transient boolean containsNullKey;
/** The current table size. */
private transient int n;
/** Threshold after which we rehash. It must be the table size times {@link #f}. */
private transient int maxFill;
/** Number of entries in the set (including the key zero, if present). */
private int size;
/**
* Creates a new hash map.
*
* <p>The actual table size will be the least power of two greater than {@code expected}/{@code
* f}.
*
* @param expected the expected number of elements in the hash map.
* @param f the load factor.
*/
@SuppressWarnings({"unchecked", "WeakerAccess"})
public ObjectOpenHashMap(final int expected, final float f) {
if (f <= 0 || f > 1) {
throw new IllegalArgumentException(
"Load factor must be greater than 0 and smaller than or equal to 1");
}
if (expected < 0) {
throw new IllegalArgumentException("The expected number of elements must be nonnegative");
}
this.f = f;
minN = n = arraySize(expected, f);
mask = n - 1;
maxFill = maxFill(n, f);
key = (K[]) new Object[n + 1];
value = (V[]) new Object[n + 1];
}
/**
* Creates a new hash map with {@link #DEFAULT_LOAD_FACTOR} as load factor.
*
* @param expected the expected number of elements in the hash map.
*/
@SuppressWarnings({"WeakerAccess", "unused"})
public ObjectOpenHashMap(final int expected) {
this(expected, DEFAULT_LOAD_FACTOR);
}
/**
* Creates a new hash map with initial expected {@link #DEFAULT_INITIAL_SIZE} entries and {@link
* #DEFAULT_LOAD_FACTOR} as load factor.
*/
@SuppressWarnings({"WeakerAccess", "unused"})
public ObjectOpenHashMap() {
this(DEFAULT_INITIAL_SIZE, DEFAULT_LOAD_FACTOR);
}
@SuppressWarnings({"unused"})
public V put(final K k, final V v) {
final int pos = find(k);
if (pos < 0) {
insert(-pos - 1, k, v);
return null;
}
final V oldValue = value[pos];
value[pos] = v;
return oldValue;
}
@SuppressWarnings({"unchecked", "unused"})
public V get(final Object k) {
if (k == null) {
return containsNullKey ? value[n] : null;
}
K curr;
final K[] key = this.key;
int pos;
// The starting point.
if (((curr = key[pos = (it.unimi.dsi.fastutil.HashCommon.mix((k).hashCode())) & mask])
== null)) {
return null;
}
if (((k).equals(curr))) {
return value[pos];
}
// There's always an unused entry.
while (true) {
if (((curr = key[pos = (pos + 1) & mask]) == null)) {
return null;
}
if (((k).equals(curr))) {
return value[pos];
}
}
}
@SuppressWarnings({"unchecked", "unused"})
public boolean containsKey(final Object k) {
if (k == null) {
return containsNullKey;
}
K curr;
final K[] key = this.key;
int pos;
// The starting point.
if (((curr = key[pos = (it.unimi.dsi.fastutil.HashCommon.mix((k).hashCode())) & mask])
== null)) {
return false;
}
if (((k).equals(curr))) {
return true;
}
// There's always an unused entry.
while (true) {
if (((curr = key[pos = (pos + 1) & mask]) == null)) {
return false;
}
if (((k).equals(curr))) {
return true;
}
}
}
@SuppressWarnings({"unchecked", "unused"})
public V remove(final Object k) {
if (k == null) {
if (containsNullKey) {
return removeNullEntry();
}
return null;
}
K curr;
final K[] key = this.key;
int pos;
// The starting point.
if (((curr = key[pos = (it.unimi.dsi.fastutil.HashCommon.mix((k).hashCode())) & mask])
== null)) {
return null;
}
if (((k).equals(curr))) {
return removeEntry(pos);
}
while (true) {
if (((curr = key[pos = (pos + 1) & mask]) == null)) {
return null;
}
if (((k).equals(curr))) {
return removeEntry(pos);
}
}
}
@SuppressWarnings({"unused"})
public void clear() {
if (size == 0) {
return;
}
size = 0;
containsNullKey = false;
Arrays.fill(key, (null));
Arrays.fill(value, null);
}
@SuppressWarnings({"unused"})
public int size() {
return size;
}
@SuppressWarnings({"unused"})
public boolean isEmpty() {
return size == 0;
}
// -------------------------------------------------------------------------------------------------------------
private int realSize() {
return containsNullKey ? size - 1 : size;
}
private V removeEntry(final int pos) {
final V oldValue = value[pos];
value[pos] = null;
size--;
shiftKeys(pos);
if (n > minN && size < maxFill / 4 && n > DEFAULT_INITIAL_SIZE) {
rehash(n / 2);
}
return oldValue;
}
private V removeNullEntry() {
containsNullKey = false;
key[n] = null;
final V oldValue = value[n];
value[n] = null;
size--;
if (n > minN && size < maxFill / 4 && n > DEFAULT_INITIAL_SIZE) {
rehash(n / 2);
}
return oldValue;
}
@SuppressWarnings("unchecked")
private int find(final K k) {
if (((k) == null)) {
return containsNullKey ? n : -(n + 1);
}
K curr;
final K[] key = this.key;
int pos;
// The starting point.
if (((curr = key[pos = (it.unimi.dsi.fastutil.HashCommon.mix((k).hashCode())) & mask])
== null)) {
return -(pos + 1);
}
if (((k).equals(curr))) {
return pos;
}
// There's always an unused entry.
while (true) {
if (((curr = key[pos = (pos + 1) & mask]) == null)) {
return -(pos + 1);
}
if (((k).equals(curr))) {
return pos;
}
}
}
private void insert(final int pos, final K k, final V v) {
if (pos == n) {
containsNullKey = true;
}
key[pos] = k;
value[pos] = v;
if (size++ >= maxFill) {
rehash(arraySize(size + 1, f));
}
}
/**
* Shifts left entries with the specified hash code, starting at the specified position, and
* empties the resulting free entry.
*
* @param pos a starting position.
*/
private void shiftKeys(int pos) {
// Shift entries with the same hash.
int last, slot;
K curr;
final K[] key = this.key;
for (; ; ) {
pos = ((last = pos) + 1) & mask;
for (; ; ) {
if (((curr = key[pos]) == null)) {
key[last] = (null);
value[last] = null;
return;
}
slot = (it.unimi.dsi.fastutil.HashCommon.mix((curr).hashCode())) & mask;
if (last <= pos ? last >= slot || slot > pos : last >= slot && slot > pos) {
break;
}
pos = (pos + 1) & mask;
}
key[last] = curr;
value[last] = value[pos];
}
}
/**
* Rehashes the map.
*
* <p>This method implements the basic rehashing strategy, and may be overridden by subclasses
* implementing different rehashing strategies (e.g., disk-based rehashing). However, you should
* not override this method unless you understand the internal workings of this class.
*
* @param newN the new size
*/
@SuppressWarnings({"unchecked", "StatementWithEmptyBody"})
private void rehash(final int newN) {
final K key[] = this.key;
final V value[] = this.value;
final int mask = newN - 1; // Note that this is used by the hashing macro
final K newKey[] = (K[]) new Object[newN + 1];
final V newValue[] = (V[]) new Object[newN + 1];
int i = n, pos;
for (int j = realSize(); j-- != 0; ) {
while (((key[--i]) == null)) {}
if (!((newKey[pos = (it.unimi.dsi.fastutil.HashCommon.mix((key[i]).hashCode())) & mask])
== null)) {
while (!((newKey[pos = (pos + 1) & mask]) == null)) {}
}
newKey[pos] = key[i];
newValue[pos] = value[i];
}
newValue[newN] = value[n];
n = newN;
this.mask = mask;
maxFill = maxFill(n, f);
this.key = newKey;
this.value = newValue;
}
/**
* Returns a hash code for this map.
*
* <p>This method overrides the generic method provided by the superclass. Since {@code equals()}
* is not overriden, it is important that the value returned by this method is the same value as
* the one returned by the overriden method.
*
* @return a hash code for this map.
*/
@Override
public int hashCode() {
int h = 0;
for (int j = realSize(), i = 0, t = 0; j-- != 0; ) {
while (((key[i]) == null)) {
i++;
}
if (this != key[i]) {
t = ((key[i]).hashCode());
}
if (this != value[i]) {
t ^= ((value[i]) == null ? 0 : (value[i]).hashCode());
}
h += t;
i++;
}
// Zero / null keys have hash zero.
if (containsNullKey) {
h += ((value[n]) == null ? 0 : (value[n]).hashCode());
}
return h;
}
}
| 6,286 |
0 | Create_ds/connect-api-specification/swagger-templates/java/static/java/com/squareup | Create_ds/connect-api-specification/swagger-templates/java/static/java/com/squareup/connect/CompleteResponse.java | package com.squareup.connect;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Link;
import javax.ws.rs.core.MultivaluedMap;
import org.glassfish.jersey.uri.UriComponent;
public class CompleteResponse<T> {
private final T data;
private final Response response;
public CompleteResponse(T data, Response response) {
this.data = data;
this.response = response;
}
public T getData() {
return this.data;
}
public Response getResponse() {
return this.response;
}
public String getBatchToken() {
Link link = this.response.getLink("'next'");
if (link != null) {
MultivaluedMap<String, String> parameters = UriComponent.decodeQuery(link.getUri(), true);
return parameters.getFirst("batch_token");
}
return null;
}
}
| 6,287 |
0 | Create_ds/geronimo-opentracing/geronimo-opentracing-extension/src/test/java/org/apache/geronimo/opentracing/extension | Create_ds/geronimo-opentracing/geronimo-opentracing-extension/src/test/java/org/apache/geronimo/opentracing/extension/proxy/TracingProxyFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.opentracing.extension.proxy;
import static java.util.stream.Collectors.joining;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.fail;
import java.util.ArrayList;
import java.util.Collection;
import org.apache.geronimo.microprofile.opentracing.common.config.GeronimoOpenTracingConfig;
import org.apache.geronimo.microprofile.opentracing.common.impl.FinishedSpan;
import org.apache.geronimo.microprofile.opentracing.common.impl.GeronimoTracer;
import org.apache.geronimo.microprofile.opentracing.common.impl.IdGenerator;
import org.apache.geronimo.microprofile.opentracing.common.impl.ScopeManagerImpl;
import org.apache.geronimo.microprofile.opentracing.common.impl.SpanImpl;
import org.testng.annotations.Test;
public class TracingProxyFactoryTest {
@Test
public void proxy() {
final Collection<FinishedSpan> spans = new ArrayList<>();
final GeronimoOpenTracingConfig config = (value, def) -> def;
IdGenerator generator = new IdGenerator();
generator.setConfig(config);
generator.init();
final GeronimoTracer tracer = new GeronimoTracer();
tracer.setConfig(config);
tracer.setIdGenerator(generator);
tracer.setScopeManager(new ScopeManagerImpl());
tracer.setFinishedSpanEvent(spans::add);
tracer.init();
final Api wrapped = new TracingProxyFactory()
.decorate(tracer, new Api() {
@Override
public String ok() {
return "yeah";
}
@Override
public void error() {
throw new IllegalStateException("expected error");
}
@Override
public String foo(final String bar) {
return "other/" + bar;
}
});
{
assertEquals("yeah", wrapped.ok());
assertSpan(spans, "org.apache.geronimo.opentracing.extension.proxy.TracingProxyFactoryTest$Api.ok");
spans.clear();
}
{
assertEquals("other/something", wrapped.foo("something"));
assertSpan(spans, "org.apache.geronimo.opentracing.extension.proxy.TracingProxyFactoryTest$Api.foo");
spans.clear();
}
{
try {
wrapped.error();
fail();
} catch (final IllegalStateException ise) {
// no-op
}
assertSpan(spans, "org.apache.geronimo.opentracing.extension.proxy.TracingProxyFactoryTest$Api.error");
final SpanImpl span = toSpanImpl(spans);
assertEquals(Boolean.TRUE, span.getTags().get("error"));
assertEquals(
"error.object=java.lang.IllegalStateException: expected error\nevent=error",
span.getLogs().stream()
.map(SpanImpl.Log::getFields)
.flatMap(m -> m.entrySet().stream())
.map(it -> it.getKey() + "=" + it.getValue())
.sorted()
.collect(joining("\n")));
spans.clear();
}
}
private void assertSpan(final Collection<FinishedSpan> spans, final String operation) {
assertEquals(1, spans.size());
final SpanImpl span = toSpanImpl(spans);
assertEquals(operation, span.getName());
}
private SpanImpl toSpanImpl(final Collection<FinishedSpan> spans) {
return SpanImpl.class.cast(spans.iterator().next().getSpan());
}
public interface Api {
String ok();
void error();
String foo(String bar);
}
}
| 6,288 |
0 | Create_ds/geronimo-opentracing/geronimo-opentracing-extension/src/main/java/org/apache/geronimo/opentracing/extension | Create_ds/geronimo-opentracing/geronimo-opentracing-extension/src/main/java/org/apache/geronimo/opentracing/extension/proxy/TracingProxyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.opentracing.extension.proxy;
import static java.util.Collections.emptyMap;
import static java.util.Optional.ofNullable;
import java.io.Serializable;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.concurrent.CompletionStage;
import java.util.stream.Stream;
import io.opentracing.Scope;
import io.opentracing.Span;
import io.opentracing.Tracer;
import io.opentracing.tag.Tags;
public class TracingProxyFactory {
public <T> T decorate(final Tracer tracer, final T instance) {
return decorate(tracer, instance, emptyMap());
}
public <T> T decorate(final Tracer tracer, final T instance, final Map<String, String> tags) {
final Class<?>[] interfaces = instance.getClass().getInterfaces();
if (interfaces.length == 0) {
throw new IllegalArgumentException("Can't determine the API to proxy: " + instance);
}
final Class<T> mainApi = (Class<T>) interfaces[0];
final Class<?>[] otherApis = interfaces.length == 1 ?
new Class<?>[0] : Stream.of(interfaces).skip(1).toArray(Class[]::new);
return decorate(tracer, instance, mainApi, tags, otherApis);
}
public <T> T decorate(final Tracer tracer,
final T instance,
final Class<T> mainApi,
final Map<String, String> tags,
final Class<?>... otherApis) {
return mainApi.cast(Proxy.newProxyInstance(
ofNullable(Thread.currentThread().getContextClassLoader()).orElseGet(ClassLoader::getSystemClassLoader),
Stream.concat(Stream.of(mainApi), Stream.of(otherApis)).toArray(Class[]::new),
new TracingHandler(instance, tracer, tags)));
}
private static class TracingHandler implements InvocationHandler, Serializable {
private final Object delegate;
private final Tracer tracer;
private final Map<String, String> tags;
private TracingHandler(final Object delegate, final Tracer tracer, final Map<String, String> tags) {
this.delegate = delegate;
this.tracer = tracer;
this.tags = tags;
}
@Override
public Object invoke(final Object proxy, final Method method, final Object[] args) throws Throwable {
final Tracer.SpanBuilder builder = tracer.buildSpan(method.getDeclaringClass().getName() + "." + method.getName());
builder.withTag(Tags.SPAN_KIND.getKey(), Tags.SPAN_KIND_SERVER);
builder.withTag(Tags.COMPONENT.getKey(), "proxy");
tags.forEach(builder::withTag);
ofNullable(tracer.activeSpan()).ifPresent(builder::asChildOf);
final Scope scope = builder.startActive(false /*just handle span inheritance for async case*/);
boolean doFinish = true;
try {
final Object result = method.invoke(delegate, args);
if (CompletionStage.class.isInstance(result)) {
doFinish = false;
final CompletionStage<?> stage = CompletionStage.class.cast(result);
return stage.handle((r, e) -> {
try {
if (e != null) {
onError(scope, e);
return rethrow(e);
}
return r;
} finally {
scope.span().finish();
}
});
}
return result;
} catch (final InvocationTargetException ite) {
onError(scope, ite.getTargetException());
throw ite.getTargetException();
} finally {
if (doFinish) {
scope.span().finish();
}
scope.close();
}
}
private Object rethrow(final Throwable e) {
if (RuntimeException.class.isInstance(e)) {
throw RuntimeException.class.cast(e);
}
if (Error.class.isInstance(e)) {
throw Error.class.cast(e);
}
throw new IllegalStateException(e);
}
private void onError(final Scope scope, final Throwable e) {
final Span span = scope.span();
Tags.ERROR.set(span, true);
final Map<String, Object> logs = new LinkedHashMap<>();
logs.put("event", Tags.ERROR.getKey());
logs.put("error.object", e);
span.log(logs);
}
}
}
| 6,289 |
0 | Create_ds/geronimo-opentracing/geronimo-opentracing-osgi/src/test/java/org/apache/geronimo/microprofile/opentracing | Create_ds/geronimo-opentracing/geronimo-opentracing-osgi/src/test/java/org/apache/geronimo/microprofile/opentracing/osgi/KarafTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.opentracing.osgi;
import static java.util.Objects.requireNonNull;
import static org.apache.ziplock.JarLocation.jarLocation;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.ops4j.pax.exam.CoreOptions.bundle;
import static org.ops4j.pax.exam.CoreOptions.maven;
import static org.ops4j.pax.exam.CoreOptions.options;
import static org.ops4j.pax.exam.CoreOptions.systemPackage;
import static org.ops4j.pax.exam.CoreOptions.url;
import static org.ops4j.pax.exam.karaf.options.KarafDistributionOption.features;
import static org.ops4j.pax.exam.karaf.options.KarafDistributionOption.karafDistributionConfiguration;
import static org.ops4j.pax.exam.karaf.options.KarafDistributionOption.keepRuntimeFolder;
import java.io.File;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Dictionary;
import java.util.Hashtable;
import javax.inject.Inject;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.ops4j.pax.exam.Configuration;
import org.ops4j.pax.exam.Option;
import org.ops4j.pax.exam.junit.PaxExam;
import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy;
import org.ops4j.pax.exam.spi.reactors.PerClass;
import org.ops4j.pax.exam.util.Filter;
import org.osgi.framework.BundleContext;
import org.osgi.framework.ServiceRegistration;
import org.osgi.service.event.Event;
import org.osgi.service.event.EventAdmin;
import org.osgi.service.event.EventConstants;
import org.osgi.service.event.EventHandler;
import io.opentracing.Tracer;
@Ignore("event admin setup is not yet right so the test can't pass")
@RunWith(PaxExam.class)
@ExamReactorStrategy(PerClass.class)
public class KarafTest {
@Inject
private BundleContext bc;
@Inject
@Filter("(objectClass=org.osgi.service.event.EventAdmin)")
private EventAdmin eventAdmin;
@Inject
@Filter("(objectClass=io.opentracing.Tracer)")
private Tracer tracer;
@Configuration
public Option[] config() throws MalformedURLException {
final File testClasses = jarLocation(KarafTest.class);
final String projectVersion = System.getProperty("project.version", "1.0.1-SNAPSHOT");
final URL testFeature = requireNonNull(new File(testClasses, "features.xml").toURI().toURL());
return options(
karafDistributionConfiguration()
.frameworkUrl(maven()
.groupId("org.apache.karaf")
.artifactId("apache-karaf")
.version(System.getProperty("karaf.version", "4.2.1"))
.type("tar.gz"))
.unpackDirectory(new File("target/karaf"))
.useDeployFolder(false)
.runEmbedded(true),
keepRuntimeFolder(),
features(url(testFeature.toExternalForm()), "test"),
bundle(new File(testClasses, "../../../geronimo-opentracing-common/target/geronimo-opentracing-common-" + projectVersion + ".jar").toURI().toURL().toExternalForm()),
bundle(new File(testClasses, "../geronimo-opentracing-osgi-" + projectVersion + ".jar").toURI().toURL().toExternalForm())
);
}
@Test
public void checkBusGetSpans() {
assertNotNull(tracer);
final Dictionary<String, Object> props = new Hashtable<>();
props.put(EventConstants.EVENT_TOPIC, "geronimo/microprofile/opentracing/zipkinSpan");
final Collection<Event> events = new ArrayList<>();
final ServiceRegistration<EventHandler> registration = bc.registerService(EventHandler.class, event -> {
synchronized (events) {
events.add(event);
}
}, props);
tracer.buildSpan("test").start().finish();
registration.unregister();
assertEquals(1, events.size());
}
}
| 6,290 |
0 | Create_ds/geronimo-opentracing/geronimo-opentracing-osgi/src/test/java/org/apache/geronimo/microprofile/opentracing/osgi | Create_ds/geronimo-opentracing/geronimo-opentracing-osgi/src/test/java/org/apache/geronimo/microprofile/opentracing/osgi/endpoint/HiWorld.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.opentracing.osgi.endpoint;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
@Path("hi")
public class HiWorld {
@GET
public String get() {
return "world";
}
}
| 6,291 |
0 | Create_ds/geronimo-opentracing/geronimo-opentracing-osgi/src/main/java/org/apache/geronimo/microprofile/opentracing | Create_ds/geronimo-opentracing/geronimo-opentracing-osgi/src/main/java/org/apache/geronimo/microprofile/opentracing/osgi/OSGiContainer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.opentracing.osgi;
import org.apache.geronimo.microprofile.opentracing.common.spi.Container;
public class OSGiContainer implements Container {
@Override
public <T> T lookup(final Class<T> type) {
return type.cast(OpenTracingActivator.INSTANCES.get(type).getInstance());
}
}
| 6,292 |
0 | Create_ds/geronimo-opentracing/geronimo-opentracing-osgi/src/main/java/org/apache/geronimo/microprofile/opentracing | Create_ds/geronimo-opentracing/geronimo-opentracing-osgi/src/main/java/org/apache/geronimo/microprofile/opentracing/osgi/ConfigAdminOpenTracingConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.opentracing.osgi;
import static java.util.Optional.ofNullable;
import java.io.IOException;
import org.apache.geronimo.microprofile.opentracing.common.config.GeronimoOpenTracingConfig;
import org.osgi.service.cm.Configuration;
import org.osgi.service.cm.ConfigurationAdmin;
public class ConfigAdminOpenTracingConfig implements GeronimoOpenTracingConfig {
private volatile Configuration delegate;
public ConfigAdminOpenTracingConfig() {
ofNullable(OpenTracingActivator.INSTANCES.get(ConfigurationAdmin.class))
.map(OpenTracingActivator.Tracked::getInstance)
.map(ConfigurationAdmin.class::cast)
.ifPresent(admin -> {
try {
delegate = admin.getConfiguration("geronimo.opentracing");
} catch (final IOException e) {
throw new IllegalArgumentException(e);
}
});
}
@Override
public String read(final String value, final String def) {
return ofNullable(delegate).map(c -> c.getProperties().get(value)).map(String::valueOf).orElse(def);
}
}
| 6,293 |
0 | Create_ds/geronimo-opentracing/geronimo-opentracing-osgi/src/main/java/org/apache/geronimo/microprofile/opentracing | Create_ds/geronimo-opentracing/geronimo-opentracing-osgi/src/main/java/org/apache/geronimo/microprofile/opentracing/osgi/OpenTracingActivator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.opentracing.osgi;
import static java.util.Collections.singletonMap;
import static java.util.Optional.ofNullable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Dictionary;
import java.util.HashMap;
import java.util.Hashtable;
import java.util.Map;
import java.util.function.BiConsumer;
import javax.ws.rs.client.ClientRequestFilter;
import javax.ws.rs.client.ClientResponseFilter;
import javax.ws.rs.container.DynamicFeature;
import org.apache.geronimo.microprofile.opentracing.common.config.GeronimoOpenTracingConfig;
import org.apache.geronimo.microprofile.opentracing.common.impl.FinishedSpan;
import org.apache.geronimo.microprofile.opentracing.common.impl.GeronimoTracer;
import org.apache.geronimo.microprofile.opentracing.common.impl.IdGenerator;
import org.apache.geronimo.microprofile.opentracing.common.impl.ScopeManagerImpl;
import org.apache.geronimo.microprofile.opentracing.common.microprofile.client.OpenTracingClientRequestFilter;
import org.apache.geronimo.microprofile.opentracing.common.microprofile.client.OpenTracingClientResponseFilter;
import org.apache.geronimo.microprofile.opentracing.common.microprofile.server.GeronimoOpenTracingFeature;
import org.apache.geronimo.microprofile.opentracing.common.microprofile.zipkin.ZipkinConverter;
import org.apache.geronimo.microprofile.opentracing.common.microprofile.zipkin.ZipkinLogger;
import org.apache.geronimo.microprofile.opentracing.common.microprofile.zipkin.ZipkinSpan;
import org.apache.geronimo.microprofile.opentracing.common.spi.Container;
import org.osgi.framework.BundleActivator;
import org.osgi.framework.BundleContext;
import org.osgi.framework.ServiceReference;
import org.osgi.framework.ServiceRegistration;
import org.osgi.service.cm.ConfigurationAdmin;
import org.osgi.service.event.Event;
import org.osgi.service.event.EventAdmin;
import org.osgi.service.event.EventConstants;
import org.osgi.service.event.EventHandler;
import org.osgi.util.tracker.ServiceTracker;
import org.osgi.util.tracker.ServiceTrackerCustomizer;
import io.opentracing.ScopeManager;
import io.opentracing.Tracer;
public class OpenTracingActivator implements BundleActivator {
// not sure we can avoid that cause of client side :(
static final Map<Class<?>, Tracked<?>> INSTANCES = new HashMap<>();
private final Collection<ServiceRegistration<?>> registrations = new ArrayList<>();
private ZipkinLogger logger;
@Override
public void start(final BundleContext context) {
INSTANCES.put(Container.class, new Tracked<>(context, Container.class, this::register));
INSTANCES.put(Tracer.class, new Tracked<>(context, Tracer.class, this::register));
INSTANCES.put(GeronimoOpenTracingConfig.class, new Tracked<>(context, GeronimoOpenTracingConfig.class, this::register));
INSTANCES.put(ScopeManager.class, new Tracked<>(context, ScopeManager.class, this::register));
INSTANCES.put(OpenTracingClientRequestFilter.class, new Tracked<>(context, OpenTracingClientRequestFilter.class, this::register));
INSTANCES.put(OpenTracingClientResponseFilter.class, new Tracked<>(context, OpenTracingClientResponseFilter.class, this::register));
INSTANCES.put(EventAdmin.class, new Tracked<>(context, EventAdmin.class, this::register));
INSTANCES.put(ConfigurationAdmin.class, new Tracked<>(context, ConfigurationAdmin.class, this::register));
final OSGiContainer container = new OSGiContainer();
final GeronimoOpenTracingConfig config = new ConfigAdminOpenTracingConfig();
final ScopeManager scopeManager = new ScopeManagerImpl();
final IdGenerator idGenerator = new IdGenerator();
idGenerator.setConfig(config);
idGenerator.init();
final GeronimoTracer tracer = new GeronimoTracer();
tracer.setConfig(config);
tracer.setIdGenerator(idGenerator);
tracer.setScopeManager(scopeManager);
tracer.setFinishedSpanEvent(span -> ofNullable(container.lookup(EventAdmin.class)).ifPresent(ea ->
ea.sendEvent(new Event("geronimo/microprofile/opentracing/finishedSpan", singletonMap("span", span)))));
tracer.init();
final ZipkinConverter zipkinConverter = new ZipkinConverter();
zipkinConverter.setConfig(config);
zipkinConverter.setIdGenerator(idGenerator);
zipkinConverter.setZipkinSpanEvent(span -> ofNullable(container.lookup(EventAdmin.class)).ifPresent(ea ->
ea.sendEvent(new Event("geronimo/microprofile/opentracing/zipkinSpan", singletonMap("span", span)))));
zipkinConverter.init();
logger = new ZipkinLogger();
logger.setConfig(config);
logger.init();
final OpenTracingClientRequestFilter requestFilter = new OpenTracingClientRequestFilter();
requestFilter.setTracer(tracer);
requestFilter.setConfig(config);
requestFilter.init();
final OpenTracingClientResponseFilter responseFilter = new OpenTracingClientResponseFilter();
final GeronimoOpenTracingFeature tracingFeature = new GeronimoOpenTracingFeature();
tracingFeature.setConfig(config);
tracingFeature.setContainer(container);
tracingFeature.setTracer(tracer);
registrations.add(context.registerService(GeronimoOpenTracingConfig.class, config, new Hashtable<>()));
registrations.add(context.registerService(Container.class, container, new Hashtable<>()));
registrations.add(context.registerService(IdGenerator.class, idGenerator, new Hashtable<>()));
registrations.add(context.registerService(ScopeManager.class, scopeManager, new Hashtable<>()));
registrations.add(context.registerService(Tracer.class, tracer, new Hashtable<>()));
registrations.add(context.registerService(ClientRequestFilter.class, requestFilter, newJaxRsExtensionProps()));
registrations.add(context.registerService(ClientResponseFilter.class, responseFilter, newJaxRsExtensionProps()));
registrations.add(context.registerService(DynamicFeature.class, tracingFeature, newJaxRsExtensionProps()));
registrations.add(context.registerService(EventHandler.class,
event -> zipkinConverter.onEvent(FinishedSpan.class.cast(event.getProperty("span"))),
newEventHandlerProps("geronimo/microprofile/opentracing/finishedSpan")));
registrations.add(context.registerService(EventHandler.class,
event -> logger.onEvent(ZipkinSpan.class.cast(event.getProperty("span"))),
newEventHandlerProps("geronimo/microprofile/opentracing/zipkinSpan")));
}
@Override
public void stop(final BundleContext context) {
INSTANCES.values().forEach(ServiceTracker::close);
INSTANCES.clear();
registrations.forEach(ServiceRegistration::unregister);
logger.destroy();
}
private Dictionary<String, Object> newEventHandlerProps(final String topic) {
final Dictionary<String, Object> props = new Hashtable<>();
props.put(EventConstants.EVENT_TOPIC, topic);
return props;
}
private Dictionary<String, Object> newJaxRsExtensionProps() {
final Dictionary<String, Object> props = new Hashtable<>();
props.put("osgi.jaxrs.extension", "true");
return props;
}
private void register(final Class<?> tClass, final Object t) {
final Tracked tracked = INSTANCES.get(tClass);
tracked.instance = t;
}
public static class Tracked<T> extends ServiceTracker<T, T> implements ServiceTrackerCustomizer<T, T> {
private volatile T instance;
private Tracked(final BundleContext context, final Class<T> clazz, final BiConsumer<Class<T>, T> onInstance) {
super(context, clazz, new ServiceTrackerCustomizer<T, T>() {
@Override
public T addingService(final ServiceReference<T> reference) {
final T service = context.getService(reference);
onInstance.accept(clazz, service);
return service;
}
@Override
public void modifiedService(final ServiceReference<T> reference, final T service) {
addingService(reference);
}
@Override
public void removedService(final ServiceReference<T> reference, final T service) {
addingService(reference);
}
});
}
T getInstance() {
return instance;
}
}
}
| 6,294 |
0 | Create_ds/geronimo-opentracing/geronimo-opentracing-osgi/src/main/java/org/apache/geronimo/microprofile/opentracing | Create_ds/geronimo-opentracing/geronimo-opentracing-osgi/src/main/java/org/apache/geronimo/microprofile/opentracing/osgi/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.opentracing.osgi;
| 6,295 |
0 | Create_ds/geronimo-opentracing/geronimo-opentracing-common/src/test/java/org/apache/geronimo/microprofile/opentracing/common | Create_ds/geronimo-opentracing/geronimo-opentracing-common/src/test/java/org/apache/geronimo/microprofile/opentracing/common/impl/IdGeneratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.opentracing.common.impl;
import static org.testng.Assert.assertEquals;
import org.testng.annotations.Test;
public class IdGeneratorTest {
@Test
public void hex() {
assertEquals(new IdGenerator() {{
config = (value, def) -> "hex";
init();
}}.next().toString().length(), 16);
}
}
| 6,296 |
0 | Create_ds/geronimo-opentracing/geronimo-opentracing-common/src/main/java/org/apache/geronimo/microprofile/opentracing | Create_ds/geronimo-opentracing/geronimo-opentracing-common/src/main/java/org/apache/geronimo/microprofile/opentracing/common/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// give a handle to the package, no other usage
package org.apache.geronimo.microprofile.opentracing.common; | 6,297 |
0 | Create_ds/geronimo-opentracing/geronimo-opentracing-common/src/main/java/org/apache/geronimo/microprofile/opentracing/common | Create_ds/geronimo-opentracing/geronimo-opentracing-common/src/main/java/org/apache/geronimo/microprofile/opentracing/common/impl/ServletHeaderTextMap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.opentracing.common.impl;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.Map;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import io.opentracing.propagation.TextMap;
public class ServletHeaderTextMap implements TextMap {
private final HttpServletRequest request;
private final HttpServletResponse response;
public ServletHeaderTextMap(final HttpServletRequest request, final HttpServletResponse response) {
this.request = request;
this.response = response;
}
public HttpServletRequest getRequest() {
return request;
}
@Override
public Iterator<Map.Entry<String, String>> iterator() {
final Enumeration<String> iterator = request.getHeaderNames();
return new Iterator<Map.Entry<String, String>>() {
@Override
public boolean hasNext() {
return iterator.hasMoreElements();
}
@Override
public Map.Entry<String, String> next() {
final String next = iterator.nextElement();
return new Map.Entry<String, String>() {
@Override
public String getKey() {
return next;
}
@Override
public String getValue() {
return String.valueOf(request.getHeader(next));
}
@Override
public String setValue(final String value) {
throw new UnsupportedOperationException();
}
};
}
};
}
@Override
public void put(final String key, final String value) {
this.response.setHeader(key, value);
}
}
| 6,298 |
0 | Create_ds/geronimo-opentracing/geronimo-opentracing-common/src/main/java/org/apache/geronimo/microprofile/opentracing/common | Create_ds/geronimo-opentracing/geronimo-opentracing-common/src/main/java/org/apache/geronimo/microprofile/opentracing/common/impl/IdGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.opentracing.common.impl;
import java.util.Random;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Supplier;
import javax.annotation.PostConstruct;
import org.apache.geronimo.microprofile.opentracing.common.config.GeronimoOpenTracingConfig;
// @ApplicationScoped
public class IdGenerator {
protected GeronimoOpenTracingConfig config;
private Supplier<Object> delegate;
private boolean counter;
public void init() {
final String type = config.read("id.generator", "counter");
counter = "counter".equalsIgnoreCase(type);
switch (type) {
case "counter":
delegate = new Supplier<Object>() {
private final AtomicLong counter = new AtomicLong();
@Override
public Object get() {
return counter.incrementAndGet();
}
};
break;
case "uuid":
delegate = () -> UUID.randomUUID().toString();
break;
case "hex": // limited to 16 for the length cause of zipkin (see span decoder)
default:
delegate = new Supplier<Object>() {
private final Random random = new Random(System.nanoTime());
private final char[] hexDigits = "0123456789abcdef".toCharArray();
private final String constantPart = config.read("id.generator.hex.prefix", "");
@Override
public Object get() {
final StringBuilder sb = new StringBuilder(16).append(constantPart);
for (int i = 0; i < 16 - constantPart.length(); i++) {
sb.append(hexDigits[random.nextInt(16)]);
}
return sb.toString();
}
};
}
}
public void setConfig(final GeronimoOpenTracingConfig config) {
this.config = config;
}
public boolean isCounter() {
return counter;
}
public Object next() {
return delegate.get();
}
}
| 6,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.