index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/test/java/org/apache/flink/statefun/flink/io | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/test/java/org/apache/flink/statefun/flink/io/kinesis/CachingPartitionerSerializerDelegateTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kinesis;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.fail;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import org.apache.flink.statefun.sdk.kinesis.egress.EgressRecord;
import org.apache.flink.statefun.sdk.kinesis.egress.KinesisEgressSerializer;
import org.junit.Test;
public class CachingPartitionerSerializerDelegateTest {
private static final String TEST_INPUT = "input";
private static final String TEST_STREAM = "stream";
private static final String TEST_PARTITION_KEY = "partition-key";
private static final String TEST_EXPLICIT_HASH_KEY = "explicit-hash-key";
@Test
public void noDuplicateSerialization() {
final CachingPartitionerSerializerDelegate<String> cachingDelegate =
new CachingPartitionerSerializerDelegate<>(new DuplicateSerializationDetectingSerializer());
cachingDelegate.serialize(TEST_INPUT);
// these throw if the wrapped serializer is used multiple times
cachingDelegate.getTargetStream(TEST_INPUT);
cachingDelegate.getPartitionId(TEST_INPUT);
cachingDelegate.getExplicitHashKey(TEST_INPUT);
}
@Test
public void serialize() {
final CachingPartitionerSerializerDelegate<String> cachingDelegate =
new CachingPartitionerSerializerDelegate<>(new DuplicateSerializationDetectingSerializer());
assertThat(
cachingDelegate.serialize(TEST_INPUT),
is(ByteBuffer.wrap(TEST_INPUT.getBytes(StandardCharsets.UTF_8))));
}
@Test
public void targetStream() {
final CachingPartitionerSerializerDelegate<String> cachingDelegate =
new CachingPartitionerSerializerDelegate<>(new DuplicateSerializationDetectingSerializer());
assertThat(cachingDelegate.getTargetStream(TEST_INPUT), is(TEST_STREAM));
}
@Test
public void partitionId() {
final CachingPartitionerSerializerDelegate<String> cachingDelegate =
new CachingPartitionerSerializerDelegate<>(new DuplicateSerializationDetectingSerializer());
assertThat(cachingDelegate.getPartitionId(TEST_INPUT), is(TEST_PARTITION_KEY));
}
@Test
public void explicitHashKey() {
final CachingPartitionerSerializerDelegate<String> cachingDelegate =
new CachingPartitionerSerializerDelegate<>(new DuplicateSerializationDetectingSerializer());
assertThat(cachingDelegate.getExplicitHashKey(TEST_INPUT), is(TEST_EXPLICIT_HASH_KEY));
}
private static class DuplicateSerializationDetectingSerializer
implements KinesisEgressSerializer<String> {
private boolean isInvoked;
@Override
public EgressRecord serialize(String value) {
if (isInvoked) {
fail("Duplicate serialization detected.");
}
isInvoked = true;
return EgressRecord.newBuilder()
.withData(value.getBytes(StandardCharsets.UTF_8))
.withStream(TEST_STREAM)
.withPartitionKey(TEST_PARTITION_KEY)
.withExplicitHashKey(TEST_EXPLICIT_HASH_KEY)
.build();
}
}
}
| 6,000 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/test/java/org/apache/flink/statefun/flink/io | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/test/java/org/apache/flink/statefun/flink/io/kinesis/KinesisSourceProviderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kinesis;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.MatcherAssert.assertThat;
import org.apache.flink.statefun.sdk.io.IngressIdentifier;
import org.apache.flink.statefun.sdk.kinesis.auth.AwsCredentials;
import org.apache.flink.statefun.sdk.kinesis.ingress.IngressRecord;
import org.apache.flink.statefun.sdk.kinesis.ingress.KinesisIngressBuilder;
import org.apache.flink.statefun.sdk.kinesis.ingress.KinesisIngressDeserializer;
import org.apache.flink.statefun.sdk.kinesis.ingress.KinesisIngressSpec;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisConsumer;
import org.junit.Test;
public class KinesisSourceProviderTest {
private static final IngressIdentifier<String> ID =
new IngressIdentifier<>(String.class, "namespace", "name");
private static final String STREAM_NAME = "test-stream";
@Test
public void exampleUsage() {
final KinesisIngressSpec<String> kinesisIngressSpec =
KinesisIngressBuilder.forIdentifier(ID)
.withAwsRegion("us-west-1")
.withAwsCredentials(AwsCredentials.basic("access-key-id", "secret-access-key"))
.withDeserializer(TestDeserializer.class)
.withStream(STREAM_NAME)
.build();
final KinesisSourceProvider provider = new KinesisSourceProvider();
final SourceFunction<String> source = provider.forSpec(kinesisIngressSpec);
assertThat(source, instanceOf(FlinkKinesisConsumer.class));
}
private static final class TestDeserializer implements KinesisIngressDeserializer<String> {
private static final long serialVersionUID = 1L;
@Override
public String deserialize(IngressRecord ingressRecord) {
return null;
}
}
}
| 6,001 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/test/java/org/apache/flink/statefun/flink/io/kinesis/binders/egress | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/test/java/org/apache/flink/statefun/flink/io/kinesis/binders/egress/v1/GenericKinesisEgressBinderV1Test.java | package org.apache.flink.statefun.flink.io.kinesis.binders.egress.v1;
import static org.hamcrest.Matchers.instanceOf;
import static org.junit.Assert.assertThat;
import java.net.URL;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
import org.apache.flink.statefun.extensions.ComponentJsonObject;
import org.apache.flink.statefun.flink.io.testutils.TestModuleBinder;
import org.apache.flink.statefun.sdk.io.EgressIdentifier;
import org.apache.flink.statefun.sdk.kinesis.egress.KinesisEgressSpec;
import org.apache.flink.statefun.sdk.reqreply.generated.TypedValue;
import org.junit.Test;
public class GenericKinesisEgressBinderV1Test {
private static final ObjectMapper OBJ_MAPPER = new ObjectMapper(new YAMLFactory());
private static final String SPEC_YAML_PATH = "kinesis-io-binders/generic-kinesis-egress-v1.yaml";
@Test
public void exampleUsage() throws Exception {
final ComponentJsonObject component = loadComponentJsonObject(SPEC_YAML_PATH);
final TestModuleBinder testModuleBinder = new TestModuleBinder();
GenericKinesisEgressBinderV1.INSTANCE.bind(component, testModuleBinder);
final EgressIdentifier<TypedValue> expectedEgressId =
new EgressIdentifier<>("com.foo.bar", "test-egress", TypedValue.class);
assertThat(testModuleBinder.getEgress(expectedEgressId), instanceOf(KinesisEgressSpec.class));
}
private static ComponentJsonObject loadComponentJsonObject(String yamlPath) throws Exception {
final URL url = GenericKinesisEgressBinderV1Test.class.getClassLoader().getResource(yamlPath);
final ObjectNode componentObject = OBJ_MAPPER.readValue(url, ObjectNode.class);
return new ComponentJsonObject(componentObject);
}
}
| 6,002 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/test/java/org/apache/flink/statefun/flink/io/kinesis/binders/ingress | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/test/java/org/apache/flink/statefun/flink/io/kinesis/binders/ingress/v1/RoutableKinesisIngressBinderV1Test.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kinesis.binders.ingress.v1;
import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.instanceOf;
import static org.junit.Assert.assertThat;
import com.google.protobuf.Message;
import java.net.URL;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
import org.apache.flink.statefun.extensions.ComponentJsonObject;
import org.apache.flink.statefun.flink.io.common.AutoRoutableProtobufRouter;
import org.apache.flink.statefun.flink.io.testutils.TestModuleBinder;
import org.apache.flink.statefun.sdk.io.IngressIdentifier;
import org.apache.flink.statefun.sdk.kinesis.ingress.KinesisIngressSpec;
import org.junit.Test;
public class RoutableKinesisIngressBinderV1Test {
private static final ObjectMapper OBJ_MAPPER = new ObjectMapper(new YAMLFactory());
private static final String SPEC_YAML_PATH =
"kinesis-io-binders/routable-kinesis-ingress-v1.yaml";
@Test
public void exampleUsage() throws Exception {
final ComponentJsonObject component = loadComponentJsonObject(SPEC_YAML_PATH);
final TestModuleBinder testModuleBinder = new TestModuleBinder();
RoutableKinesisIngressBinderV1.INSTANCE.bind(component, testModuleBinder);
final IngressIdentifier<Message> expectedIngressId =
new IngressIdentifier<>(Message.class, "com.foo.bar", "test-ingress");
assertThat(
testModuleBinder.getIngress(expectedIngressId), instanceOf(KinesisIngressSpec.class));
assertThat(
testModuleBinder.getRouters(expectedIngressId),
hasItem(instanceOf(AutoRoutableProtobufRouter.class)));
}
private static ComponentJsonObject loadComponentJsonObject(String yamlPath) throws Exception {
final URL url = RoutableKinesisIngressBinderV1Test.class.getClassLoader().getResource(yamlPath);
final ObjectNode componentObject = OBJ_MAPPER.readValue(url, ObjectNode.class);
return new ComponentJsonObject(componentObject);
}
}
| 6,003 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/test/java/org/apache/flink/statefun/flink/io | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/test/java/org/apache/flink/statefun/flink/io/common/ReflectionUtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.common;
import static org.hamcrest.MatcherAssert.assertThat;
import org.apache.flink.statefun.sdk.kafka.KafkaIngressDeserializer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.hamcrest.CoreMatchers;
import org.junit.Test;
public class ReflectionUtilTest {
private static final class Serializer implements KafkaIngressDeserializer<String> {
private static final long serialVersionUID = 1;
@Override
public String deserialize(ConsumerRecord<byte[], byte[]> input) {
return null;
}
}
@Test
public void example() {
Serializer serializer = ReflectionUtil.instantiate(Serializer.class);
assertThat(serializer, CoreMatchers.notNullValue());
}
}
| 6,004 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/test/java/org/apache/flink/statefun/flink/io/kafka/binders/egress | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/test/java/org/apache/flink/statefun/flink/io/kafka/binders/egress/v1/GenericKafkaEgressBinderV1Test.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kafka.binders.egress.v1;
import static org.hamcrest.Matchers.instanceOf;
import static org.junit.Assert.assertThat;
import java.net.URL;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
import org.apache.flink.statefun.extensions.ComponentJsonObject;
import org.apache.flink.statefun.flink.io.testutils.TestModuleBinder;
import org.apache.flink.statefun.sdk.io.EgressIdentifier;
import org.apache.flink.statefun.sdk.kafka.KafkaEgressSpec;
import org.apache.flink.statefun.sdk.reqreply.generated.TypedValue;
import org.junit.Test;
public class GenericKafkaEgressBinderV1Test {
private static final ObjectMapper OBJ_MAPPER = new ObjectMapper(new YAMLFactory());
private static final String SPEC_YAML_PATH = "kafka-io-binders/generic-kafka-egress-v1.yaml";
@Test
public void exampleUsage() throws Exception {
final ComponentJsonObject component = loadComponentJsonObject(SPEC_YAML_PATH);
final TestModuleBinder testModuleBinder = new TestModuleBinder();
GenericKafkaEgressBinderV1.INSTANCE.bind(component, testModuleBinder);
final EgressIdentifier<TypedValue> expectedEgressId =
new EgressIdentifier<>("com.foo.bar", "test-egress", TypedValue.class);
assertThat(testModuleBinder.getEgress(expectedEgressId), instanceOf(KafkaEgressSpec.class));
}
private static ComponentJsonObject loadComponentJsonObject(String yamlPath) throws Exception {
final URL url = GenericKafkaEgressBinderV1Test.class.getClassLoader().getResource(yamlPath);
final ObjectNode componentObject = OBJ_MAPPER.readValue(url, ObjectNode.class);
return new ComponentJsonObject(componentObject);
}
}
| 6,005 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/test/java/org/apache/flink/statefun/flink/io/kafka/binders/ingress | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/test/java/org/apache/flink/statefun/flink/io/kafka/binders/ingress/v1/RoutableKafkaIngressBinderV1Test.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kafka.binders.ingress.v1;
import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.instanceOf;
import static org.junit.Assert.assertThat;
import com.google.protobuf.Message;
import java.net.URL;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
import org.apache.flink.statefun.extensions.ComponentJsonObject;
import org.apache.flink.statefun.flink.io.common.AutoRoutableProtobufRouter;
import org.apache.flink.statefun.flink.io.testutils.TestModuleBinder;
import org.apache.flink.statefun.sdk.io.IngressIdentifier;
import org.apache.flink.statefun.sdk.kafka.KafkaIngressSpec;
import org.junit.Test;
public class RoutableKafkaIngressBinderV1Test {
private static final ObjectMapper OBJ_MAPPER = new ObjectMapper(new YAMLFactory());
private static final String SPEC_YAML_PATH = "kafka-io-binders/routable-kafka-ingress-v1.yaml";
@Test
public void exampleUsage() throws Exception {
final ComponentJsonObject component = loadComponentJsonObject(SPEC_YAML_PATH);
final TestModuleBinder testModuleBinder = new TestModuleBinder();
RoutableKafkaIngressBinderV1.INSTANCE.bind(component, testModuleBinder);
final IngressIdentifier<Message> expectedIngressId =
new IngressIdentifier<>(Message.class, "com.foo.bar", "test-ingress");
assertThat(testModuleBinder.getIngress(expectedIngressId), instanceOf(KafkaIngressSpec.class));
assertThat(
testModuleBinder.getRouters(expectedIngressId),
hasItem(instanceOf(AutoRoutableProtobufRouter.class)));
}
private static ComponentJsonObject loadComponentJsonObject(String yamlPath) throws Exception {
final URL url = RoutableKafkaIngressBinderV1Test.class.getClassLoader().getResource(yamlPath);
final ObjectNode componentObject = OBJ_MAPPER.readValue(url, ObjectNode.class);
return new ComponentJsonObject(componentObject);
}
}
| 6,006 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/test/java/org/apache/flink/statefun/flink/io | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/test/java/org/apache/flink/statefun/flink/io/testutils/TestModuleBinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.testutils;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import org.apache.flink.statefun.sdk.FunctionType;
import org.apache.flink.statefun.sdk.FunctionTypeNamespaceMatcher;
import org.apache.flink.statefun.sdk.StatefulFunctionProvider;
import org.apache.flink.statefun.sdk.io.EgressIdentifier;
import org.apache.flink.statefun.sdk.io.EgressSpec;
import org.apache.flink.statefun.sdk.io.IngressIdentifier;
import org.apache.flink.statefun.sdk.io.IngressSpec;
import org.apache.flink.statefun.sdk.io.Router;
import org.apache.flink.statefun.sdk.spi.StatefulFunctionModule;
public final class TestModuleBinder implements StatefulFunctionModule.Binder {
private final Map<IngressIdentifier<?>, IngressSpec<?>> ingress = new HashMap<>();
private final Map<EgressIdentifier<?>, EgressSpec<?>> egress = new HashMap<>();
private final Map<IngressIdentifier<?>, List<Router<?>>> routers = new HashMap<>();
private final Map<FunctionType, StatefulFunctionProvider> specificFunctionProviders =
new HashMap<>();
private final Map<String, StatefulFunctionProvider> namespaceFunctionProviders = new HashMap<>();
@Override
public <T> void bindIngress(IngressSpec<T> spec) {
Objects.requireNonNull(spec);
IngressIdentifier<T> id = spec.id();
ingress.put(id, spec);
}
@Override
public <T> void bindIngressRouter(IngressIdentifier<T> ingressIdentifier, Router<T> router) {
Objects.requireNonNull(ingressIdentifier);
Objects.requireNonNull(router);
List<Router<?>> ingressRouters =
routers.computeIfAbsent(ingressIdentifier, unused -> new ArrayList<>());
ingressRouters.add(router);
}
@Override
public <T> void bindEgress(EgressSpec<T> spec) {
Objects.requireNonNull(spec);
EgressIdentifier<T> id = spec.id();
egress.put(id, spec);
}
@Override
public void bindFunctionProvider(FunctionType functionType, StatefulFunctionProvider provider) {
Objects.requireNonNull(functionType);
Objects.requireNonNull(provider);
specificFunctionProviders.put(functionType, provider);
}
@Override
public void bindFunctionProvider(
FunctionTypeNamespaceMatcher namespaceMatcher, StatefulFunctionProvider provider) {
Objects.requireNonNull(namespaceMatcher);
Objects.requireNonNull(provider);
namespaceFunctionProviders.put(namespaceMatcher.targetNamespace(), provider);
}
@SuppressWarnings("unchecked")
public <T> IngressSpec<T> getIngress(IngressIdentifier<T> ingressIdentifier) {
return (IngressSpec<T>) ingress.get(ingressIdentifier);
}
public <T> List<Router<?>> getRouters(IngressIdentifier<T> ingressIdentifier) {
return routers.get(ingressIdentifier);
}
@SuppressWarnings("unchecked")
public <T> EgressSpec<T> getEgress(EgressIdentifier<T> egressIdentifier) {
return (EgressSpec<T>) egress.get(egressIdentifier);
}
}
| 6,007 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/test/java/org/apache/flink/statefun/flink/io | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/test/java/org/apache/flink/statefun/flink/io/testutils/YamlUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.testutils;
import java.io.IOException;
import java.net.URL;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
public final class YamlUtils {
private YamlUtils() {}
public static JsonNode loadAsJsonFromClassResource(ClassLoader classLoader, String pathToYaml) {
URL moduleUrl = classLoader.getResource(pathToYaml);
ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
try {
return mapper.readTree(moduleUrl);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
| 6,008 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/CachingPartitionerSerializerDelegate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kinesis;
import java.nio.ByteBuffer;
import java.util.Objects;
import javax.annotation.concurrent.NotThreadSafe;
import org.apache.flink.statefun.sdk.kinesis.egress.EgressRecord;
import org.apache.flink.statefun.sdk.kinesis.egress.KinesisEgressSerializer;
import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisProducer;
import org.apache.flink.streaming.connectors.kinesis.KinesisPartitioner;
import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisSerializationSchema;
/**
* An implementation of a {@link KinesisPartitioner} and {@link KinesisSerializationSchema}, that
* delegates partitioning and serialization to a wrapped {@link KinesisEgressSerializer}, while also
* caching already processed element objects to avoid duplicate serialization.
*
* <p>To avoid duplicate serialization, a shared instance of this is used as both the partitioner
* and the serialization schema within a single subtask of a {@link FlinkKinesisProducer}.
*
* <p>Note that this class is not thread-safe, and should not be accessed concurrently.
*
* @param <T>
*/
@NotThreadSafe
final class CachingPartitionerSerializerDelegate<T> extends KinesisPartitioner<T>
implements KinesisSerializationSchema<T> {
private static final long serialVersionUID = 1L;
private final KinesisEgressSerializer<T> delegate;
private transient T lastProcessedElement;
private transient EgressRecord lastSerializedRecord;
CachingPartitionerSerializerDelegate(KinesisEgressSerializer<T> delegate) {
this.delegate = Objects.requireNonNull(delegate);
}
@Override
public ByteBuffer serialize(T element) {
return ByteBuffer.wrap(getLastOrCreateNewSerializedRecord(element).getData());
}
@Override
public String getTargetStream(T element) {
return getLastOrCreateNewSerializedRecord(element).getStream();
}
@Override
public String getPartitionId(T element) {
return getLastOrCreateNewSerializedRecord(element).getPartitionKey();
}
@Override
public String getExplicitHashKey(T element) {
return getLastOrCreateNewSerializedRecord(element).getExplicitHashKey();
}
private EgressRecord getLastOrCreateNewSerializedRecord(T element) {
if (element == lastProcessedElement) {
return lastSerializedRecord;
}
lastProcessedElement = element;
lastSerializedRecord = delegate.serialize(element);
return lastSerializedRecord;
}
}
| 6,009 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/KinesisDeserializationSchemaDelegate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kinesis;
import java.io.IOException;
import java.util.Objects;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.statefun.flink.common.UnimplementedTypeInfo;
import org.apache.flink.statefun.sdk.kinesis.ingress.IngressRecord;
import org.apache.flink.statefun.sdk.kinesis.ingress.KinesisIngressDeserializer;
import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema;
final class KinesisDeserializationSchemaDelegate<T> implements KinesisDeserializationSchema<T> {
private static final long serialVersionUID = 1L;
private final TypeInformation<T> producedTypeInfo = new UnimplementedTypeInfo<>();
private final KinesisIngressDeserializer<T> delegate;
KinesisDeserializationSchemaDelegate(KinesisIngressDeserializer<T> delegate) {
this.delegate = Objects.requireNonNull(delegate);
}
@Override
public T deserialize(
byte[] recordValue,
String partitionKey,
String seqNum,
long approxArrivalTimestamp,
String stream,
String shardId)
throws IOException {
return delegate.deserialize(
IngressRecord.newBuilder()
.withData(recordValue)
.withStream(stream)
.withShardId(shardId)
.withPartitionKey(partitionKey)
.withSequenceNumber(seqNum)
.withApproximateArrivalTimestamp(approxArrivalTimestamp)
.build());
}
@Override
public TypeInformation<T> getProducedType() {
return producedTypeInfo;
}
}
| 6,010 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/KinesisFlinkIOModule.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kinesis;
import com.google.auto.service.AutoService;
import java.util.Map;
import org.apache.flink.statefun.flink.io.spi.FlinkIoModule;
import org.apache.flink.statefun.sdk.kinesis.KinesisIOTypes;
@AutoService(FlinkIoModule.class)
public final class KinesisFlinkIOModule implements FlinkIoModule {
@Override
public void configure(Map<String, String> globalConfiguration, Binder binder) {
binder.bindSourceProvider(KinesisIOTypes.UNIVERSAL_INGRESS_TYPE, new KinesisSourceProvider());
binder.bindSinkProvider(KinesisIOTypes.UNIVERSAL_EGRESS_TYPE, new KinesisSinkProvider());
}
}
| 6,011 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/AwsAuthConfigProperties.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kinesis;
import java.net.URI;
import java.util.Locale;
import java.util.Properties;
import org.apache.flink.kinesis.shaded.com.amazonaws.regions.DefaultAwsRegionProviderChain;
import org.apache.flink.statefun.sdk.kinesis.auth.AwsCredentials;
import org.apache.flink.statefun.sdk.kinesis.auth.AwsRegion;
import org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants;
final class AwsAuthConfigProperties {
private AwsAuthConfigProperties() {}
static Properties forAwsRegionConsumerProps(AwsRegion awsRegion) {
final Properties properties = new Properties();
if (awsRegion.isDefault()) {
properties.setProperty(AWSConfigConstants.AWS_REGION, regionFromDefaultProviderChain());
} else if (awsRegion.isId()) {
properties.setProperty(AWSConfigConstants.AWS_REGION, awsRegion.asId().id());
} else if (awsRegion.isCustomEndpoint()) {
final AwsRegion.CustomEndpointAwsRegion customEndpoint = awsRegion.asCustomEndpoint();
properties.setProperty(AWSConfigConstants.AWS_ENDPOINT, customEndpoint.serviceEndpoint());
properties.setProperty(AWSConfigConstants.AWS_REGION, customEndpoint.regionId());
} else {
throw new IllegalStateException("Unrecognized AWS region configuration type: " + awsRegion);
}
return properties;
}
static Properties forAwsRegionProducerProps(AwsRegion awsRegion) {
final Properties properties = new Properties();
if (awsRegion.isDefault()) {
properties.setProperty(AWSConfigConstants.AWS_REGION, regionFromDefaultProviderChain());
} else if (awsRegion.isId()) {
properties.setProperty(AWSConfigConstants.AWS_REGION, awsRegion.asId().id());
} else if (awsRegion.isCustomEndpoint()) {
final AwsRegion.CustomEndpointAwsRegion customEndpoint = awsRegion.asCustomEndpoint();
final URI uri = URI.create(customEndpoint.serviceEndpoint());
properties.setProperty("KinesisEndpoint", uri.getHost());
properties.setProperty(AWSConfigConstants.AWS_REGION, customEndpoint.regionId());
int port = uri.getPort();
if (port != -1) {
properties.setProperty("KinesisPort", String.valueOf(port));
}
} else {
throw new IllegalStateException("Unrecognized AWS region configuration type: " + awsRegion);
}
return properties;
}
static Properties forAwsCredentials(AwsCredentials awsCredentials) {
final Properties properties = new Properties();
if (awsCredentials.isDefault()) {
properties.setProperty(
AWSConfigConstants.AWS_CREDENTIALS_PROVIDER,
AWSConfigConstants.CredentialProvider.AUTO.name());
} else if (awsCredentials.isBasic()) {
properties.setProperty(
AWSConfigConstants.AWS_CREDENTIALS_PROVIDER,
AWSConfigConstants.CredentialProvider.BASIC.name());
final AwsCredentials.BasicAwsCredentials basicCredentials = awsCredentials.asBasic();
properties.setProperty(
AWSConfigConstants.accessKeyId(AWSConfigConstants.AWS_CREDENTIALS_PROVIDER),
basicCredentials.accessKeyId());
properties.setProperty(
AWSConfigConstants.secretKey(AWSConfigConstants.AWS_CREDENTIALS_PROVIDER),
basicCredentials.secretAccessKey());
} else if (awsCredentials.isProfile()) {
properties.setProperty(
AWSConfigConstants.AWS_CREDENTIALS_PROVIDER,
AWSConfigConstants.CredentialProvider.PROFILE.name());
final AwsCredentials.ProfileAwsCredentials profileCredentials = awsCredentials.asProfile();
properties.setProperty(
AWSConfigConstants.profileName(AWSConfigConstants.AWS_CREDENTIALS_PROVIDER),
profileCredentials.name());
profileCredentials
.path()
.ifPresent(
path ->
properties.setProperty(
AWSConfigConstants.profilePath(AWSConfigConstants.AWS_CREDENTIALS_PROVIDER),
path));
} else {
throw new IllegalStateException(
"Unrecognized AWS credentials configuration type: " + awsCredentials);
}
return properties;
}
private static String regionFromDefaultProviderChain() {
return new DefaultAwsRegionProviderChain().getRegion().toLowerCase(Locale.ENGLISH);
}
}
| 6,012 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/KinesisSourceProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kinesis;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.util.Properties;
import org.apache.flink.statefun.flink.io.spi.SourceProvider;
import org.apache.flink.statefun.sdk.io.IngressSpec;
import org.apache.flink.statefun.sdk.kinesis.ingress.KinesisIngressSpec;
import org.apache.flink.statefun.sdk.kinesis.ingress.KinesisIngressStartupPosition;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisConsumer;
import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema;
import org.apache.flink.streaming.connectors.kinesis.util.AWSUtil;
public final class KinesisSourceProvider implements SourceProvider {
@Override
public <T> SourceFunction<T> forSpec(IngressSpec<T> spec) {
final KinesisIngressSpec<T> kinesisIngressSpec = asKinesisSpec(spec);
return new FlinkKinesisConsumer<>(
kinesisIngressSpec.streams(),
deserializationSchemaFromSpec(kinesisIngressSpec),
propertiesFromSpec(kinesisIngressSpec));
}
private static <T> KinesisIngressSpec<T> asKinesisSpec(IngressSpec<T> spec) {
if (spec instanceof KinesisIngressSpec) {
return (KinesisIngressSpec<T>) spec;
}
if (spec == null) {
throw new NullPointerException("Unable to translate a NULL spec");
}
throw new IllegalArgumentException(String.format("Wrong type %s", spec.type()));
}
private static <T> KinesisDeserializationSchema<T> deserializationSchemaFromSpec(
KinesisIngressSpec<T> spec) {
return new KinesisDeserializationSchemaDelegate<>(spec.deserializer());
}
private static Properties propertiesFromSpec(KinesisIngressSpec<?> spec) {
final Properties properties = new Properties();
properties.putAll(resolveProperties(spec.properties()));
spec.awsRegion()
.transformPropertiesIfPresent(
properties,
ConsumerConfigConstants.AWS_REGION,
(props, region) ->
properties.putAll(AwsAuthConfigProperties.forAwsRegionConsumerProps(region)));
spec.awsCredentials()
.transformPropertiesIfPresent(
properties,
ConsumerConfigConstants.AWS_CREDENTIALS_PROVIDER,
(props, credentials) ->
properties.putAll(AwsAuthConfigProperties.forAwsCredentials(credentials)));
setStartupPositionProperties(properties, spec.startupPosition());
return properties;
}
private static Properties resolveProperties(Properties properties) {
final Properties resolvedProps = new Properties();
for (String property : properties.stringPropertyNames()) {
if (property.startsWith("flink.") || property.startsWith("aws.")) {
resolvedProps.setProperty(property, properties.getProperty(property));
} else {
// all other configs are assumed to be AWS configs
resolvedProps.setProperty(
asAwsClientPropertyKey(property), properties.getProperty(property));
}
}
return resolvedProps;
}
private static void setStartupPositionProperties(
Properties properties, KinesisIngressStartupPosition startupPosition) {
if (startupPosition.isEarliest()) {
properties.setProperty(
ConsumerConfigConstants.STREAM_INITIAL_POSITION,
ConsumerConfigConstants.InitialPosition.TRIM_HORIZON.name());
} else if (startupPosition.isLatest()) {
properties.setProperty(
ConsumerConfigConstants.STREAM_INITIAL_POSITION,
ConsumerConfigConstants.InitialPosition.LATEST.name());
} else if (startupPosition.isDate()) {
properties.setProperty(
ConsumerConfigConstants.STREAM_INITIAL_POSITION,
ConsumerConfigConstants.InitialPosition.AT_TIMESTAMP.name());
final ZonedDateTime startupDate = startupPosition.asDate().date();
final DateTimeFormatter formatter =
DateTimeFormatter.ofPattern(ConsumerConfigConstants.DEFAULT_STREAM_TIMESTAMP_DATE_FORMAT);
properties.setProperty(
ConsumerConfigConstants.STREAM_INITIAL_TIMESTAMP, startupDate.format(formatter));
} else {
throw new IllegalStateException(
"Unrecognized ingress startup position type: " + startupPosition);
}
}
private static String asAwsClientPropertyKey(String key) {
return AWSUtil.AWS_CLIENT_CONFIG_PREFIX + lowercaseFirstLetter(key);
}
private static String lowercaseFirstLetter(String string) {
final char[] chars = string.toCharArray();
chars[0] = Character.toLowerCase(chars[0]);
return new String(chars);
}
}
| 6,013 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/KinesisSinkProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kinesis;
import java.util.Properties;
import org.apache.flink.statefun.flink.io.common.ReflectionUtil;
import org.apache.flink.statefun.flink.io.spi.SinkProvider;
import org.apache.flink.statefun.sdk.io.EgressSpec;
import org.apache.flink.statefun.sdk.kinesis.egress.KinesisEgressSerializer;
import org.apache.flink.statefun.sdk.kinesis.egress.KinesisEgressSpec;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisProducer;
public final class KinesisSinkProvider implements SinkProvider {
@Override
public <T> SinkFunction<T> forSpec(EgressSpec<T> spec) {
final KinesisEgressSpec<T> kinesisEgressSpec = asKinesisSpec(spec);
final CachingPartitionerSerializerDelegate<T> partitionerSerializerDelegate =
new CachingPartitionerSerializerDelegate<>(serializerInstanceFromSpec(kinesisEgressSpec));
final FlinkKinesisProducer<T> kinesisProducer =
new FlinkKinesisProducer<>(
partitionerSerializerDelegate, propertiesFromSpec(kinesisEgressSpec));
kinesisProducer.setCustomPartitioner(partitionerSerializerDelegate);
kinesisProducer.setQueueLimit(kinesisEgressSpec.maxOutstandingRecords());
// set fail on error, for at-least-once delivery semantics to Kinesis
kinesisProducer.setFailOnError(true);
return kinesisProducer;
}
private static Properties propertiesFromSpec(KinesisEgressSpec<?> spec) {
final Properties properties = new Properties();
properties.putAll(spec.clientConfigurationProperties());
properties.putAll(AwsAuthConfigProperties.forAwsRegionProducerProps(spec.awsRegion()));
properties.putAll(AwsAuthConfigProperties.forAwsCredentials(spec.awsCredentials()));
return properties;
}
private static <T> KinesisEgressSpec<T> asKinesisSpec(EgressSpec<T> spec) {
if (spec instanceof KinesisEgressSpec) {
return (KinesisEgressSpec<T>) spec;
}
if (spec == null) {
throw new NullPointerException("Unable to translate a NULL spec");
}
throw new IllegalArgumentException(String.format("Wrong type %s", spec.type()));
}
private static <T> KinesisEgressSerializer<T> serializerInstanceFromSpec(
KinesisEgressSpec<T> spec) {
return ReflectionUtil.instantiate(spec.serializerClass());
}
}
| 6,014 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/binders/AwsRegionJsonDeserializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kinesis.binders;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonParser;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.DeserializationContext;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonDeserializer;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.statefun.sdk.kinesis.auth.AwsRegion;
public final class AwsRegionJsonDeserializer extends JsonDeserializer<AwsRegion> {
private static final String DEFAULT_TYPE = "default";
private static final String SPECIFIED_ID_TYPE = "specific";
private static final String CUSTOM_ENDPOINT_TYPE = "custom-endpoint";
@Override
public AwsRegion deserialize(JsonParser jsonParser, DeserializationContext deserializationContext)
throws IOException {
final ObjectNode awsRegionNode = jsonParser.readValueAs(ObjectNode.class);
final String typeString = awsRegionNode.get("type").asText();
switch (typeString) {
case DEFAULT_TYPE:
return AwsRegion.fromDefaultProviderChain();
case SPECIFIED_ID_TYPE:
return AwsRegion.ofId(awsRegionNode.get("id").asText());
case CUSTOM_ENDPOINT_TYPE:
return AwsRegion.ofCustomEndpoint(
awsRegionNode.get("endpoint").asText(), awsRegionNode.get("id").asText());
default:
final List<String> validValues =
Arrays.asList(DEFAULT_TYPE, SPECIFIED_ID_TYPE, CUSTOM_ENDPOINT_TYPE);
throw new IllegalArgumentException(
"Invalid AWS region type: "
+ typeString
+ "; valid values are ["
+ String.join(", ", validValues)
+ "]");
}
}
}
| 6,015 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/binders/AwsCredentialsJsonDeserializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kinesis.binders;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonParser;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.DeserializationContext;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonDeserializer;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.statefun.sdk.kinesis.auth.AwsCredentials;
public final class AwsCredentialsJsonDeserializer extends JsonDeserializer<AwsCredentials> {
private static final String DEFAULT_TYPE = "default";
private static final String BASIC_TYPE = "basic";
private static final String PROFILE_TYPE = "profile";
@Override
public AwsCredentials deserialize(
JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException {
final ObjectNode awsCredentialsNode = jsonParser.readValueAs(ObjectNode.class);
final String typeString = awsCredentialsNode.get("type").asText();
switch (typeString) {
case DEFAULT_TYPE:
return AwsCredentials.fromDefaultProviderChain();
case BASIC_TYPE:
return AwsCredentials.basic(
awsCredentialsNode.get("accessKeyId").asText(),
awsCredentialsNode.get("secretAccessKey").asText());
case PROFILE_TYPE:
final JsonNode pathNode = awsCredentialsNode.get("profilePath");
if (pathNode != null) {
return AwsCredentials.profile(
awsCredentialsNode.get("profileName").asText(), pathNode.asText());
} else {
return AwsCredentials.profile(awsCredentialsNode.get("profileName").asText());
}
default:
final List<String> validValues = Arrays.asList(DEFAULT_TYPE, BASIC_TYPE, PROFILE_TYPE);
throw new IllegalArgumentException(
"Invalid AWS credential type: "
+ typeString
+ "; valid values are ["
+ String.join(", ", validValues)
+ "]");
}
}
}
| 6,016 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/binders/egress | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/binders/egress/v1/Module.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kinesis.binders.egress.v1;
import com.google.auto.service.AutoService;
import java.util.Map;
import org.apache.flink.statefun.extensions.ExtensionModule;
@AutoService(ExtensionModule.class)
public final class Module implements ExtensionModule {
@Override
public void configure(Map<String, String> globalConfigurations, Binder universeBinder) {
universeBinder.bindExtension(
GenericKinesisEgressBinderV1.KIND_TYPE, GenericKinesisEgressBinderV1.INSTANCE);
}
}
| 6,017 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/binders/egress | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/binders/egress/v1/GenericKinesisEgressSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kinesis.binders.egress.v1;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.flink.statefun.flink.common.types.TypedValueUtil;
import org.apache.flink.statefun.sdk.egress.generated.KinesisEgressRecord;
import org.apache.flink.statefun.sdk.kinesis.egress.EgressRecord;
import org.apache.flink.statefun.sdk.kinesis.egress.KinesisEgressSerializer;
import org.apache.flink.statefun.sdk.reqreply.generated.TypedValue;
public final class GenericKinesisEgressSerializer implements KinesisEgressSerializer<TypedValue> {
private static final long serialVersionUID = 1L;
@Override
public EgressRecord serialize(TypedValue value) {
final KinesisEgressRecord kinesisEgressRecord = asKinesisEgressRecord(value);
final EgressRecord.Builder builder =
EgressRecord.newBuilder()
.withData(kinesisEgressRecord.getValueBytes().toByteArray())
.withStream(kinesisEgressRecord.getStream())
.withPartitionKey(kinesisEgressRecord.getPartitionKey());
final String explicitHashKey = kinesisEgressRecord.getExplicitHashKey();
if (explicitHashKey != null && !explicitHashKey.isEmpty()) {
builder.withExplicitHashKey(explicitHashKey);
}
return builder.build();
}
private static KinesisEgressRecord asKinesisEgressRecord(TypedValue message) {
if (!TypedValueUtil.isProtobufTypeOf(message, KinesisEgressRecord.getDescriptor())) {
throw new IllegalStateException(
"The generic Kinesis egress expects only messages of type "
+ KinesisEgressRecord.class.getName());
}
try {
return KinesisEgressRecord.parseFrom(message.getValue());
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException(
"Unable to unpack message as a " + KinesisEgressRecord.class.getName(), e);
}
}
}
| 6,018 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/binders/egress | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/binders/egress/v1/GenericKinesisEgressSpec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kinesis.binders.egress.v1;
import java.util.Objects;
import java.util.Properties;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder;
import org.apache.flink.statefun.flink.io.common.json.EgressIdentifierJsonDeserializer;
import org.apache.flink.statefun.flink.io.common.json.PropertiesJsonDeserializer;
import org.apache.flink.statefun.flink.io.kinesis.binders.AwsCredentialsJsonDeserializer;
import org.apache.flink.statefun.flink.io.kinesis.binders.AwsRegionJsonDeserializer;
import org.apache.flink.statefun.sdk.io.EgressIdentifier;
import org.apache.flink.statefun.sdk.kinesis.auth.AwsCredentials;
import org.apache.flink.statefun.sdk.kinesis.auth.AwsRegion;
import org.apache.flink.statefun.sdk.kinesis.egress.KinesisEgressBuilder;
import org.apache.flink.statefun.sdk.kinesis.egress.KinesisEgressSpec;
import org.apache.flink.statefun.sdk.reqreply.generated.TypedValue;
@JsonDeserialize(builder = GenericKinesisEgressSpec.Builder.class)
public final class GenericKinesisEgressSpec {
private final EgressIdentifier<TypedValue> id;
private final AwsRegion awsRegion;
private final AwsCredentials awsCredentials;
private final int maxOutstandingRecords;
private final Properties properties;
private GenericKinesisEgressSpec(
EgressIdentifier<TypedValue> id,
AwsRegion awsRegion,
AwsCredentials awsCredentials,
int maxOutstandingRecords,
Properties properties) {
this.id = Objects.requireNonNull(id);
this.awsRegion = Objects.requireNonNull(awsRegion);
this.awsCredentials = Objects.requireNonNull(awsCredentials);
this.maxOutstandingRecords = Objects.requireNonNull(maxOutstandingRecords);
this.properties = Objects.requireNonNull(properties);
}
public KinesisEgressSpec<TypedValue> toUniversalKinesisEgressSpec() {
final KinesisEgressBuilder<TypedValue> builder =
KinesisEgressBuilder.forIdentifier(id)
.withAwsRegion(awsRegion)
.withAwsCredentials(awsCredentials)
.withMaxOutstandingRecords(maxOutstandingRecords)
.withProperties(properties)
.withSerializer(GenericKinesisEgressSerializer.class);
return builder.build();
}
public EgressIdentifier<TypedValue> id() {
return id;
}
@JsonPOJOBuilder
public static class Builder {
private final EgressIdentifier<TypedValue> id;
private AwsRegion awsRegion = AwsRegion.fromDefaultProviderChain();
private AwsCredentials awsCredentials = AwsCredentials.fromDefaultProviderChain();
private int maxOutstandingRecords = 1000;
private Properties properties = new Properties();
@JsonCreator
private Builder(
@JsonProperty("id") @JsonDeserialize(using = EgressIdentifierJsonDeserializer.class)
EgressIdentifier<TypedValue> id) {
this.id = Objects.requireNonNull(id);
}
@JsonProperty("awsRegion")
@JsonDeserialize(using = AwsRegionJsonDeserializer.class)
public Builder withAwsRegion(AwsRegion awsRegion) {
this.awsRegion = Objects.requireNonNull(awsRegion);
return this;
}
@JsonProperty("awsCredentials")
@JsonDeserialize(using = AwsCredentialsJsonDeserializer.class)
public Builder withAwsCredentials(AwsCredentials awsCredentials) {
this.awsCredentials = Objects.requireNonNull(awsCredentials);
return this;
}
@JsonProperty("maxOutstandingRecords")
public Builder withMaxOutstandingRecords(int maxOutstandingRecords) {
this.maxOutstandingRecords = maxOutstandingRecords;
return this;
}
@JsonProperty("clientConfigProperties")
@JsonDeserialize(using = PropertiesJsonDeserializer.class)
public Builder withProperties(Properties properties) {
this.properties = Objects.requireNonNull(properties);
return this;
}
public GenericKinesisEgressSpec build() {
return new GenericKinesisEgressSpec(
id, awsRegion, awsCredentials, maxOutstandingRecords, properties);
}
}
}
| 6,019 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/binders/egress | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/binders/egress/v1/GenericKinesisEgressBinderV1.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kinesis.binders.egress.v1;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonProcessingException;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.statefun.extensions.ComponentBinder;
import org.apache.flink.statefun.extensions.ComponentJsonObject;
import org.apache.flink.statefun.flink.common.json.StateFunObjectMapper;
import org.apache.flink.statefun.sdk.TypeName;
import org.apache.flink.statefun.sdk.egress.generated.KinesisEgressRecord;
import org.apache.flink.statefun.sdk.spi.StatefulFunctionModule;
/**
* Version 1 {@link ComponentBinder} for binding a Kinesis egress which expects {@link
* KinesisEgressRecord} as input, and writes the wrapped value bytes to Kinesis. Corresponding
* {@link TypeName} is {@code io.statefun.kinesis.v1/egress}.
*
* <p>Below is an example YAML document of the {@link ComponentJsonObject} recognized by this
* binder, with the expected types of each field:
*
* <pre>
* kind: io.statefun.kinesis.v1/egress (typename)
* spec: (object)
* id: com.foo.bar/my-egress (typename)
* awsRegion: (object, optional)
* type: specific (string)
* id: us-west-2 (string)
* awsCredentials: (object, optional)
* type: basic (string)
* accessKeyId: my_access_key_id (string)
* secretAccessKey: my_secret_access_key (string)
* maxOutstandingRecords: 9999 (int, optional)
* clientConfigProperties: (array, optional)
* - SocketTimeout: 9999 (string)
* - MaxConnections: 15 (string)
* - ...
* </pre>
*
* <p>The {@code awsRegion} and {@code awsCredentials} options all have multiple options to choose
* from. Please see {@link GenericKinesisEgressSpec} for further details.
*/
final class GenericKinesisEgressBinderV1 implements ComponentBinder {
private static final ObjectMapper SPEC_OBJ_MAPPER = StateFunObjectMapper.create();
static final GenericKinesisEgressBinderV1 INSTANCE = new GenericKinesisEgressBinderV1();
static final TypeName KIND_TYPE = TypeName.parseFrom("io.statefun.kinesis.v1/egress");
private GenericKinesisEgressBinderV1() {}
@Override
public void bind(
ComponentJsonObject component, StatefulFunctionModule.Binder remoteModuleBinder) {
validateComponent(component);
final JsonNode specJsonNode = component.specJsonNode();
final GenericKinesisEgressSpec spec = parseSpec(specJsonNode);
remoteModuleBinder.bindEgress(spec.toUniversalKinesisEgressSpec());
}
private static void validateComponent(ComponentJsonObject componentJsonObject) {
final TypeName targetBinderType = componentJsonObject.binderTypename();
if (!targetBinderType.equals(KIND_TYPE)) {
throw new IllegalStateException(
"Received unexpected ModuleComponent to bind: " + componentJsonObject);
}
}
private static GenericKinesisEgressSpec parseSpec(JsonNode specJsonNode) {
try {
return SPEC_OBJ_MAPPER.treeToValue(specJsonNode, GenericKinesisEgressSpec.class);
} catch (JsonProcessingException e) {
throw new RuntimeException("Error parsing a GenericKinesisEgressSpec.", e);
}
}
}
| 6,020 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/binders/ingress | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/binders/ingress/v1/RoutableKinesisIngressSpec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kinesis.binders.ingress.v1;
import com.google.protobuf.Message;
import java.io.IOException;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.time.format.DateTimeParseException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Properties;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonParser;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.DeserializationContext;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonDeserializer;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.statefun.flink.io.common.json.IngressIdentifierJsonDeserializer;
import org.apache.flink.statefun.flink.io.common.json.PropertiesJsonDeserializer;
import org.apache.flink.statefun.flink.io.generated.RoutingConfig;
import org.apache.flink.statefun.flink.io.generated.TargetFunctionType;
import org.apache.flink.statefun.flink.io.kinesis.binders.AwsCredentialsJsonDeserializer;
import org.apache.flink.statefun.flink.io.kinesis.binders.AwsRegionJsonDeserializer;
import org.apache.flink.statefun.sdk.TypeName;
import org.apache.flink.statefun.sdk.io.IngressIdentifier;
import org.apache.flink.statefun.sdk.kinesis.auth.AwsCredentials;
import org.apache.flink.statefun.sdk.kinesis.auth.AwsRegion;
import org.apache.flink.statefun.sdk.kinesis.ingress.KinesisIngressBuilder;
import org.apache.flink.statefun.sdk.kinesis.ingress.KinesisIngressBuilderApiExtension;
import org.apache.flink.statefun.sdk.kinesis.ingress.KinesisIngressSpec;
import org.apache.flink.statefun.sdk.kinesis.ingress.KinesisIngressStartupPosition;
@JsonDeserialize(builder = RoutableKinesisIngressSpec.Builder.class)
public class RoutableKinesisIngressSpec {
private final IngressIdentifier<Message> id;
private final AwsRegion awsRegion;
private final AwsCredentials awsCredentials;
private final KinesisIngressStartupPosition startupPosition;
private final Map<String, RoutingConfig> streamRoutings;
private final Properties properties;
private RoutableKinesisIngressSpec(
IngressIdentifier<Message> id,
AwsRegion awsRegion,
AwsCredentials awsCredentials,
KinesisIngressStartupPosition startupPosition,
Map<String, RoutingConfig> streamRoutings,
Properties properties) {
this.id = Objects.requireNonNull(id);
this.awsRegion = Objects.requireNonNull(awsRegion);
this.awsCredentials = Objects.requireNonNull(awsCredentials);
this.startupPosition = Objects.requireNonNull(startupPosition);
this.streamRoutings = Objects.requireNonNull(streamRoutings);
this.properties = Objects.requireNonNull(properties);
}
public KinesisIngressSpec<Message> toUniversalKinesisIngressSpec() {
final KinesisIngressBuilder<Message> builder =
KinesisIngressBuilder.forIdentifier(id)
.withAwsRegion(awsRegion)
.withAwsCredentials(awsCredentials)
.withStartupPosition(startupPosition)
.withProperties(properties);
streamRoutings.keySet().forEach(builder::withStream);
KinesisIngressBuilderApiExtension.withDeserializer(
builder, new RoutableKinesisIngressDeserializer(streamRoutings));
return builder.build();
}
public IngressIdentifier<Message> id() {
return id;
}
@JsonPOJOBuilder
public static class Builder {
private final IngressIdentifier<Message> id;
private AwsRegion awsRegion = AwsRegion.fromDefaultProviderChain();
private AwsCredentials awsCredentials = AwsCredentials.fromDefaultProviderChain();
private KinesisIngressStartupPosition startupPosition =
KinesisIngressStartupPosition.fromLatest();
private Map<String, RoutingConfig> streamRoutings = new HashMap<>();
private Properties properties = new Properties();
@JsonCreator
private Builder(
@JsonProperty("id") @JsonDeserialize(using = IngressIdentifierJsonDeserializer.class)
IngressIdentifier<Message> id) {
this.id = Objects.requireNonNull(id);
}
@JsonProperty("awsRegion")
@JsonDeserialize(using = AwsRegionJsonDeserializer.class)
public Builder withAwsRegion(AwsRegion awsRegion) {
this.awsRegion = Objects.requireNonNull(awsRegion);
return this;
}
@JsonProperty("awsCredentials")
@JsonDeserialize(using = AwsCredentialsJsonDeserializer.class)
public Builder withAwsCredentials(AwsCredentials awsCredentials) {
this.awsCredentials = Objects.requireNonNull(awsCredentials);
return this;
}
@JsonProperty("startupPosition")
@JsonDeserialize(using = StartupPositionJsonDeserializer.class)
public Builder withStartupPosition(KinesisIngressStartupPosition startupPosition) {
this.startupPosition = Objects.requireNonNull(startupPosition);
return this;
}
@JsonProperty("streams")
@JsonDeserialize(using = StreamRoutingsJsonDeserializer.class)
public Builder withStreamRoutings(Map<String, RoutingConfig> streamRoutings) {
this.streamRoutings = Objects.requireNonNull(streamRoutings);
return this;
}
@JsonProperty("clientConfigProperties")
@JsonDeserialize(using = PropertiesJsonDeserializer.class)
public Builder withProperties(Properties properties) {
this.properties = Objects.requireNonNull(properties);
return this;
}
public RoutableKinesisIngressSpec build() {
return new RoutableKinesisIngressSpec(
id, awsRegion, awsCredentials, startupPosition, streamRoutings, properties);
}
}
private static class StreamRoutingsJsonDeserializer
extends JsonDeserializer<Map<String, RoutingConfig>> {
@Override
public Map<String, RoutingConfig> deserialize(
JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException {
final ObjectNode[] routingJsonNodes = jsonParser.readValueAs(ObjectNode[].class);
final Map<String, RoutingConfig> result = new HashMap<>(routingJsonNodes.length);
for (ObjectNode routingJsonNode : routingJsonNodes) {
final RoutingConfig routingConfig =
RoutingConfig.newBuilder()
.setTypeUrl(routingJsonNode.get("valueType").textValue())
.addAllTargetFunctionTypes(parseTargetFunctions(routingJsonNode))
.build();
result.put(routingJsonNode.get("stream").asText(), routingConfig);
}
return result;
}
}
private static class StartupPositionJsonDeserializer
extends JsonDeserializer<KinesisIngressStartupPosition> {
private static final String EARLIEST_TYPE = "earliest";
private static final String LATEST_TYPE = "latest";
private static final String DATE_TYPE = "date";
private static final String DATE_PATTERN = "yyyy-MM-dd HH:mm:ss.SSS Z";
private static final DateTimeFormatter DATE_FORMATTER =
DateTimeFormatter.ofPattern(DATE_PATTERN);
@Override
public KinesisIngressStartupPosition deserialize(
JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException {
final ObjectNode startupPositionNode = jsonParser.readValueAs(ObjectNode.class);
final String startupTypeString = startupPositionNode.get("type").asText();
switch (startupTypeString) {
case EARLIEST_TYPE:
return KinesisIngressStartupPosition.fromEarliest();
case LATEST_TYPE:
return KinesisIngressStartupPosition.fromLatest();
case DATE_TYPE:
return KinesisIngressStartupPosition.fromDate(parseStartupDate(startupPositionNode));
default:
final List<String> validValues = Arrays.asList(EARLIEST_TYPE, LATEST_TYPE, DATE_TYPE);
throw new IllegalArgumentException(
"Invalid startup position type: "
+ startupTypeString
+ "; valid values are ["
+ String.join(", ", validValues)
+ "]");
}
}
}
private static List<TargetFunctionType> parseTargetFunctions(JsonNode routingJsonNode) {
final Iterable<JsonNode> targetFunctionNodes = routingJsonNode.get("targets");
return StreamSupport.stream(targetFunctionNodes.spliterator(), false)
.map(RoutableKinesisIngressSpec::parseTargetFunctionType)
.collect(Collectors.toList());
}
private static TargetFunctionType parseTargetFunctionType(JsonNode targetFunctionNode) {
final TypeName targetType = TypeName.parseFrom(targetFunctionNode.asText());
return TargetFunctionType.newBuilder()
.setNamespace(targetType.namespace())
.setType(targetType.name())
.build();
}
private static ZonedDateTime parseStartupDate(ObjectNode startupPositionNode) {
final String dateString = startupPositionNode.get("date").asText();
try {
return ZonedDateTime.parse(dateString, StartupPositionJsonDeserializer.DATE_FORMATTER);
} catch (DateTimeParseException e) {
throw new IllegalArgumentException(
"Unable to parse date string for startup position: "
+ dateString
+ "; the date should conform to the pattern "
+ StartupPositionJsonDeserializer.DATE_PATTERN,
e);
}
}
}
| 6,021 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/binders/ingress | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/binders/ingress/v1/Module.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kinesis.binders.ingress.v1;
import com.google.auto.service.AutoService;
import java.util.Map;
import org.apache.flink.statefun.extensions.ExtensionModule;
@AutoService(ExtensionModule.class)
public final class Module implements ExtensionModule {
@Override
public void configure(Map<String, String> globalConfigurations, Binder universeBinder) {
universeBinder.bindExtension(
RoutableKinesisIngressBinderV1.KIND_TYPE, RoutableKinesisIngressBinderV1.INSTANCE);
}
}
| 6,022 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/binders/ingress | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/binders/ingress/v1/RoutableKinesisIngressBinderV1.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kinesis.binders.ingress.v1;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonProcessingException;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.statefun.extensions.ComponentBinder;
import org.apache.flink.statefun.extensions.ComponentJsonObject;
import org.apache.flink.statefun.flink.common.json.StateFunObjectMapper;
import org.apache.flink.statefun.flink.io.common.AutoRoutableProtobufRouter;
import org.apache.flink.statefun.sdk.TypeName;
import org.apache.flink.statefun.sdk.spi.StatefulFunctionModule;
/**
* Version 1 {@link ComponentBinder} for binding a Kinesis ingress which automatically routes
* records to target functions using the record key as the function id. Corresponding {@link
* TypeName} is {@code io.statefun.kinesis.v1/ingress}.
*
* <p>Below is an example YAML document of the {@link ComponentJsonObject} recognized by this
* binder, with the expected types of each field:
*
* <pre>
* kind: io.statefun.kinesis.v1/ingress (typename)
* spec: (object)
* id: com.foo.bar/my-ingress (typename)
* awsRegion: (object, optional)
* type: specific (string)
* id: us-west-2 (string)
* awsCredentials: (object, optional)
* type: basic (string)
* accessKeyId: my_access_key_id (string)
* secretAccessKey: my_secret_access_key (string)
* startupPosition: (object, optional)
* type: earliest (string)
* streams: (array)
* - stream: stream-1 (string)
* valueType: com.foo.bar/my-type-1 (typename)
* targets: (array)
* - com.mycomp.foo/function-1 (typename)
* - ...
* - ...
* clientConfigProperties: (array, optional)
* - SocketTimeout: 9999 (string)
* - MaxConnections: 15 (string)
* - ...
* </pre>
*
* <p>The {@code awsRegion}, {@code awsCredentials}, {@code startupPosition} options all have
* multiple options to choose from. Please see {@link RoutableKinesisIngressSpec} for further
* details.
*/
final class RoutableKinesisIngressBinderV1 implements ComponentBinder {
private static final ObjectMapper SPEC_OBJ_MAPPER = StateFunObjectMapper.create();
static final RoutableKinesisIngressBinderV1 INSTANCE = new RoutableKinesisIngressBinderV1();
static final TypeName KIND_TYPE = TypeName.parseFrom("io.statefun.kinesis.v1/ingress");
private RoutableKinesisIngressBinderV1() {}
@Override
public void bind(
ComponentJsonObject component, StatefulFunctionModule.Binder remoteModuleBinder) {
validateComponent(component);
final JsonNode specJsonNode = component.specJsonNode();
final RoutableKinesisIngressSpec spec = parseSpec(specJsonNode);
remoteModuleBinder.bindIngress(spec.toUniversalKinesisIngressSpec());
remoteModuleBinder.bindIngressRouter(spec.id(), new AutoRoutableProtobufRouter());
}
private static void validateComponent(ComponentJsonObject componentJsonObject) {
final TypeName targetBinderType = componentJsonObject.binderTypename();
if (!targetBinderType.equals(KIND_TYPE)) {
throw new IllegalStateException(
"Received unexpected ModuleComponent to bind: " + componentJsonObject);
}
}
private static RoutableKinesisIngressSpec parseSpec(JsonNode specJsonNode) {
try {
return SPEC_OBJ_MAPPER.treeToValue(specJsonNode, RoutableKinesisIngressSpec.class);
} catch (JsonProcessingException e) {
throw new RuntimeException("Error parsing an AutoRoutableKinesisIngressSpec.", e);
}
}
}
| 6,023 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/binders/ingress | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kinesis/binders/ingress/v1/RoutableKinesisIngressDeserializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kinesis.binders.ingress.v1;
import com.google.protobuf.Message;
import com.google.protobuf.MoreByteStrings;
import java.util.Map;
import org.apache.flink.statefun.flink.io.generated.AutoRoutable;
import org.apache.flink.statefun.flink.io.generated.RoutingConfig;
import org.apache.flink.statefun.sdk.TypeName;
import org.apache.flink.statefun.sdk.kinesis.ingress.IngressRecord;
import org.apache.flink.statefun.sdk.kinesis.ingress.KinesisIngressDeserializer;
public final class RoutableKinesisIngressDeserializer
implements KinesisIngressDeserializer<Message> {
private static final long serialVersionUID = 1L;
private final Map<String, RoutingConfig> routingConfigs;
public RoutableKinesisIngressDeserializer(Map<String, RoutingConfig> routingConfigs) {
if (routingConfigs == null || routingConfigs.isEmpty()) {
throw new IllegalArgumentException(
"Routing config for routable Kinesis ingress cannot be empty.");
}
this.routingConfigs = routingConfigs;
}
@Override
public Message deserialize(IngressRecord ingressRecord) {
final String stream = ingressRecord.getStream();
final String partitionKey = requireNonNullKey(ingressRecord.getPartitionKey());
final RoutingConfig routingConfig = routingConfigs.get(stream);
if (routingConfig == null) {
throw new IllegalStateException(
"Consumed a record from stream [" + stream + "], but no routing config was specified.");
}
return AutoRoutable.newBuilder()
.setConfig(routingConfig)
.setId(partitionKey)
.setPayloadBytes(MoreByteStrings.wrap(ingressRecord.getData()))
.build();
}
private String requireNonNullKey(String partitionKey) {
if (partitionKey == null) {
TypeName tpe = RoutableKinesisIngressBinderV1.KIND_TYPE;
throw new IllegalStateException(
"The "
+ tpe.namespace()
+ "/"
+ tpe.name()
+ " ingress requires a UTF-8 partition key set for each stream record.");
}
return partitionKey;
}
}
| 6,024 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/common/AutoRoutableProtobufRouter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.common;
import com.google.protobuf.ByteString;
import com.google.protobuf.Message;
import org.apache.flink.statefun.flink.io.generated.AutoRoutable;
import org.apache.flink.statefun.flink.io.generated.RoutingConfig;
import org.apache.flink.statefun.flink.io.generated.TargetFunctionType;
import org.apache.flink.statefun.sdk.FunctionType;
import org.apache.flink.statefun.sdk.io.Router;
import org.apache.flink.statefun.sdk.reqreply.generated.TypedValue;
/**
* A {@link Router} that recognizes messages of type {@link AutoRoutable}.
*
* <p>For each incoming {@code AutoRoutable}, this router forwards the wrapped payload to the
* configured target addresses as a {@link TypedValue} message.
*/
public final class AutoRoutableProtobufRouter implements Router<Message> {
/**
* Note: while the input and type of this method is both {@link Message}, we actually do a
* conversion here. The input {@link Message} is an {@link AutoRoutable}, which gets converted to
* a {@link TypedValue} as the output after slicing the target address and actual payload.
*/
@Override
public void route(Message message, Downstream<Message> downstream) {
final AutoRoutable routable = asAutoRoutable(message);
final RoutingConfig config = routable.getConfig();
for (TargetFunctionType targetFunction : config.getTargetFunctionTypesList()) {
downstream.forward(
sdkFunctionType(targetFunction),
routable.getId(),
typedValuePayload(config.getTypeUrl(), routable.getPayloadBytes()));
}
}
private static AutoRoutable asAutoRoutable(Message message) {
try {
return (AutoRoutable) message;
} catch (ClassCastException e) {
throw new RuntimeException(
"This router only expects messages of type " + AutoRoutable.class.getName(), e);
}
}
private FunctionType sdkFunctionType(TargetFunctionType targetFunctionType) {
return new FunctionType(targetFunctionType.getNamespace(), targetFunctionType.getType());
}
private static TypedValue typedValuePayload(String typeUrl, ByteString payloadBytes) {
return TypedValue.newBuilder()
.setTypename(typeUrl)
.setHasValue(true)
.setValue(payloadBytes)
.build();
}
}
| 6,025 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/common/ReflectionUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.common;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import org.apache.flink.annotation.Internal;
@Internal
public final class ReflectionUtil {
private ReflectionUtil() {}
public static <T> T instantiate(Class<T> type) {
try {
Constructor<T> defaultConstructor = type.getDeclaredConstructor();
defaultConstructor.setAccessible(true);
return defaultConstructor.newInstance();
} catch (NoSuchMethodException e) {
throw new IllegalStateException(
"Unable to create an instance of " + type.getName() + " has no default constructor", e);
} catch (IllegalAccessException | InstantiationException | InvocationTargetException e) {
throw new IllegalStateException("Unable to create an instance of " + type.getName(), e);
}
}
}
| 6,026 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/common | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/common/json/EgressIdentifierJsonDeserializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.common.json;
import java.io.IOException;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonParser;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.DeserializationContext;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonDeserializer;
import org.apache.flink.statefun.sdk.TypeName;
import org.apache.flink.statefun.sdk.io.EgressIdentifier;
import org.apache.flink.statefun.sdk.reqreply.generated.TypedValue;
public final class EgressIdentifierJsonDeserializer
extends JsonDeserializer<EgressIdentifier<TypedValue>> {
@Override
public EgressIdentifier<TypedValue> deserialize(
JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException {
final TypeName id = TypeName.parseFrom(jsonParser.getText());
return new EgressIdentifier<>(id.namespace(), id.name(), TypedValue.class);
}
}
| 6,027 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/common | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/common/json/IngressIdentifierJsonDeserializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.common.json;
import com.google.protobuf.Message;
import java.io.IOException;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonParser;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.DeserializationContext;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonDeserializer;
import org.apache.flink.statefun.sdk.TypeName;
import org.apache.flink.statefun.sdk.io.IngressIdentifier;
public final class IngressIdentifierJsonDeserializer
extends JsonDeserializer<IngressIdentifier<Message>> {
@Override
public IngressIdentifier<Message> deserialize(
JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException {
final TypeName id = TypeName.parseFrom(jsonParser.getText());
return new IngressIdentifier<>(Message.class, id.namespace(), id.name());
}
}
| 6,028 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/common | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/common/json/PropertiesJsonDeserializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.common.json;
import java.io.IOException;
import java.util.Map;
import java.util.Properties;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonParser;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.DeserializationContext;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonDeserializer;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode;
public final class PropertiesJsonDeserializer extends JsonDeserializer<Properties> {
@Override
public Properties deserialize(
JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException {
final Iterable<JsonNode> propertyNodes = jsonParser.readValueAs(JsonNode.class);
final Properties properties = new Properties();
propertyNodes.forEach(
jsonNode -> {
Map.Entry<String, JsonNode> offsetNode = jsonNode.fields().next();
properties.setProperty(offsetNode.getKey(), offsetNode.getValue().asText());
});
return properties;
}
}
| 6,029 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/KafkaFlinkIoModule.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kafka;
import com.google.auto.service.AutoService;
import java.util.Map;
import org.apache.flink.statefun.flink.io.spi.FlinkIoModule;
import org.apache.flink.statefun.sdk.kafka.Constants;
@AutoService(FlinkIoModule.class)
public final class KafkaFlinkIoModule implements FlinkIoModule {
@Override
public void configure(Map<String, String> globalConfiguration, Binder binder) {
binder.bindSourceProvider(Constants.KAFKA_INGRESS_TYPE, new KafkaSourceProvider());
binder.bindSinkProvider(Constants.KAFKA_EGRESS_TYPE, new KafkaSinkProvider());
}
}
| 6,030 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/KafkaDeserializationSchemaDelegate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kafka;
import java.util.Objects;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.statefun.flink.common.UnimplementedTypeInfo;
import org.apache.flink.statefun.sdk.kafka.KafkaIngressDeserializer;
import org.apache.flink.streaming.connectors.kafka.KafkaDeserializationSchema;
import org.apache.kafka.clients.consumer.ConsumerRecord;
final class KafkaDeserializationSchemaDelegate<T> implements KafkaDeserializationSchema<T> {
private static final long serialVersionUID = 1;
private final TypeInformation<T> producedTypeInfo;
private final KafkaIngressDeserializer<T> delegate;
KafkaDeserializationSchemaDelegate(KafkaIngressDeserializer<T> delegate) {
this.producedTypeInfo = new UnimplementedTypeInfo<>();
this.delegate = Objects.requireNonNull(delegate);
}
@Override
public boolean isEndOfStream(T t) {
return false;
}
@Override
public T deserialize(ConsumerRecord<byte[], byte[]> consumerRecord) {
return delegate.deserialize(consumerRecord);
}
@Override
public TypeInformation<T> getProducedType() {
// this would never be actually used, it would be replaced during translation with the type
// information
// of IngressIdentifier's producedType.
// see: Sources#setOutputType.
// if this invriant would not hold in the future, this type information would produce a
// serialier
// that fails immediately.
return producedTypeInfo;
}
}
| 6,031 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/KafkaSerializationSchemaDelegate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kafka;
import java.util.Objects;
import javax.annotation.Nullable;
import org.apache.flink.statefun.sdk.kafka.KafkaEgressSerializer;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.kafka.clients.producer.ProducerRecord;
final class KafkaSerializationSchemaDelegate<T> implements KafkaSerializationSchema<T> {
private static final long serialVersionUID = 1L;
private final KafkaEgressSerializer<T> serializer;
KafkaSerializationSchemaDelegate(KafkaEgressSerializer<T> serializer) {
this.serializer = Objects.requireNonNull(serializer);
}
@Override
public ProducerRecord<byte[], byte[]> serialize(T t, @Nullable Long aLong) {
return serializer.serialize(t);
}
}
| 6,032 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/KafkaSinkProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kafka;
import static org.apache.flink.util.StringUtils.generateRandomAlphanumericString;
import java.util.Properties;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.flink.statefun.flink.io.common.ReflectionUtil;
import org.apache.flink.statefun.flink.io.spi.SinkProvider;
import org.apache.flink.statefun.sdk.io.EgressSpec;
import org.apache.flink.statefun.sdk.kafka.KafkaEgressSerializer;
import org.apache.flink.statefun.sdk.kafka.KafkaEgressSpec;
import org.apache.flink.statefun.sdk.kafka.KafkaProducerSemantic;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer.Semantic;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.kafka.clients.producer.ProducerConfig;
public class KafkaSinkProvider implements SinkProvider {
@Override
public <T> SinkFunction<T> forSpec(EgressSpec<T> egressSpec) {
KafkaEgressSpec<T> spec = asSpec(egressSpec);
Properties properties = new Properties();
properties.putAll(spec.properties());
properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, spec.kafkaAddress());
Semantic producerSemantic = semanticFromSpec(spec);
if (producerSemantic == Semantic.EXACTLY_ONCE) {
properties.setProperty(
ProducerConfig.TRANSACTION_TIMEOUT_CONFIG,
String.valueOf(spec.semantic().asExactlyOnceSemantic().transactionTimeout().toMillis()));
}
return new FlinkKafkaProducer<>(
randomKafkaTopic(),
serializerFromSpec(spec),
properties,
producerSemantic,
spec.kafkaProducerPoolSize());
}
private <T> KafkaSerializationSchema<T> serializerFromSpec(KafkaEgressSpec<T> spec) {
KafkaEgressSerializer<T> serializer = ReflectionUtil.instantiate(spec.serializerClass());
return new KafkaSerializationSchemaDelegate<>(serializer);
}
private static <T> Semantic semanticFromSpec(KafkaEgressSpec<T> spec) {
final KafkaProducerSemantic semantic = spec.semantic();
if (semantic.isExactlyOnceSemantic()) {
return Semantic.EXACTLY_ONCE;
} else if (semantic.isAtLeastOnceSemantic()) {
return Semantic.AT_LEAST_ONCE;
} else if (semantic.isNoSemantic()) {
return Semantic.NONE;
} else {
throw new IllegalArgumentException("Unknown producer semantic " + semantic.getClass());
}
}
private static <T> KafkaEgressSpec<T> asSpec(EgressSpec<T> spec) {
if (spec instanceof KafkaEgressSpec) {
return (KafkaEgressSpec<T>) spec;
}
if (spec == null) {
throw new NullPointerException("Unable to translate a NULL spec");
}
throw new IllegalArgumentException(String.format("Wrong type %s", spec.type()));
}
private static String randomKafkaTopic() {
return "__stateful_functions_random_topic_"
+ generateRandomAlphanumericString(ThreadLocalRandom.current(), 16);
}
}
| 6,033 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/KafkaSourceProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kafka;
import java.util.HashMap;
import java.util.Map;
import org.apache.flink.statefun.flink.io.spi.SourceProvider;
import org.apache.flink.statefun.sdk.io.IngressSpec;
import org.apache.flink.statefun.sdk.kafka.KafkaIngressSpec;
import org.apache.flink.statefun.sdk.kafka.KafkaIngressStartupPosition;
import org.apache.flink.statefun.sdk.kafka.KafkaTopicPartition;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.KafkaDeserializationSchema;
public class KafkaSourceProvider implements SourceProvider {
@Override
public <T> SourceFunction<T> forSpec(IngressSpec<T> ingressSpec) {
KafkaIngressSpec<T> spec = asKafkaSpec(ingressSpec);
FlinkKafkaConsumer<T> consumer =
new FlinkKafkaConsumer<>(
spec.topics(), deserializationSchemaFromSpec(spec), spec.properties());
configureStartupPosition(consumer, spec.startupPosition());
return consumer;
}
private static <T> KafkaIngressSpec<T> asKafkaSpec(IngressSpec<T> ingressSpec) {
if (ingressSpec instanceof KafkaIngressSpec) {
return (KafkaIngressSpec<T>) ingressSpec;
}
if (ingressSpec == null) {
throw new NullPointerException("Unable to translate a NULL spec");
}
throw new IllegalArgumentException(String.format("Wrong type %s", ingressSpec.type()));
}
private static <T> void configureStartupPosition(
FlinkKafkaConsumer<T> consumer, KafkaIngressStartupPosition startupPosition) {
if (startupPosition.isGroupOffsets()) {
consumer.setStartFromGroupOffsets();
} else if (startupPosition.isEarliest()) {
consumer.setStartFromEarliest();
} else if (startupPosition.isLatest()) {
consumer.setStartFromLatest();
} else if (startupPosition.isSpecificOffsets()) {
KafkaIngressStartupPosition.SpecificOffsetsPosition offsetsPosition =
startupPosition.asSpecificOffsets();
consumer.setStartFromSpecificOffsets(
convertKafkaTopicPartitionMap(offsetsPosition.specificOffsets()));
} else if (startupPosition.isDate()) {
KafkaIngressStartupPosition.DatePosition datePosition = startupPosition.asDate();
consumer.setStartFromTimestamp(datePosition.epochMilli());
} else {
throw new IllegalStateException("Safe guard; should not occur");
}
}
private <T> KafkaDeserializationSchema<T> deserializationSchemaFromSpec(
KafkaIngressSpec<T> spec) {
return new KafkaDeserializationSchemaDelegate<>(spec.deserializer());
}
private static Map<
org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition, Long>
convertKafkaTopicPartitionMap(Map<KafkaTopicPartition, Long> offsets) {
Map<org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition, Long> result =
new HashMap<>(offsets.size());
for (Map.Entry<KafkaTopicPartition, Long> offset : offsets.entrySet()) {
result.put(convertKafkaTopicPartition(offset.getKey()), offset.getValue());
}
return result;
}
private static org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition
convertKafkaTopicPartition(KafkaTopicPartition partition) {
return new org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition(
partition.topic(), partition.partition());
}
}
| 6,034 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/binders/egress | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/binders/egress/v1/GenericKafkaEgressSpec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kafka.binders.egress.v1;
import java.io.IOException;
import java.time.Duration;
import java.util.Objects;
import java.util.Optional;
import java.util.Properties;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonParser;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.DeserializationContext;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonDeserializer;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.statefun.flink.io.common.json.EgressIdentifierJsonDeserializer;
import org.apache.flink.statefun.flink.io.common.json.PropertiesJsonDeserializer;
import org.apache.flink.statefun.sdk.io.EgressIdentifier;
import org.apache.flink.statefun.sdk.kafka.KafkaEgressBuilder;
import org.apache.flink.statefun.sdk.kafka.KafkaEgressSpec;
import org.apache.flink.statefun.sdk.kafka.KafkaProducerSemantic;
import org.apache.flink.statefun.sdk.reqreply.generated.TypedValue;
import org.apache.flink.util.TimeUtils;
@JsonDeserialize(builder = GenericKafkaEgressSpec.Builder.class)
final class GenericKafkaEgressSpec {
private final EgressIdentifier<TypedValue> id;
private final Optional<String> address;
private final KafkaProducerSemantic producerSemantic;
private final Properties properties;
private GenericKafkaEgressSpec(
EgressIdentifier<TypedValue> id,
Optional<String> address,
KafkaProducerSemantic producerSemantic,
Properties properties) {
this.id = Objects.requireNonNull(id);
this.address = Objects.requireNonNull(address);
this.producerSemantic = Objects.requireNonNull(producerSemantic);
this.properties = Objects.requireNonNull(properties);
}
public KafkaEgressSpec<TypedValue> toUniversalKafkaEgressSpec() {
final KafkaEgressBuilder<TypedValue> builder = KafkaEgressBuilder.forIdentifier(id);
address.ifPresent(builder::withKafkaAddress);
builder.withProducerSemantic(producerSemantic);
builder.withProperties(properties);
builder.withSerializer(GenericKafkaEgressSerializer.class);
return builder.build();
}
@JsonPOJOBuilder
public static class Builder {
private final EgressIdentifier<TypedValue> id;
private Optional<String> kafkaAddress = Optional.empty();
private KafkaProducerSemantic producerSemantic = KafkaProducerSemantic.atLeastOnce();
private Properties properties = new Properties();
@JsonCreator
private Builder(
@JsonProperty("id") @JsonDeserialize(using = EgressIdentifierJsonDeserializer.class)
EgressIdentifier<TypedValue> id) {
this.id = Objects.requireNonNull(id);
}
@JsonProperty("address")
public Builder withKafkaAddress(String address) {
Objects.requireNonNull(address);
this.kafkaAddress = Optional.of(address);
return this;
}
@JsonProperty("deliverySemantic")
@JsonDeserialize(using = ProducerSemanticJsonDeserializer.class)
public Builder withDeliverySemantic(KafkaProducerSemantic producerSemantic) {
this.producerSemantic = Objects.requireNonNull(producerSemantic);
return this;
}
@JsonProperty("properties")
@JsonDeserialize(using = PropertiesJsonDeserializer.class)
public Builder withProperties(Properties properties) {
this.properties = Objects.requireNonNull(properties);
return this;
}
public GenericKafkaEgressSpec build() {
return new GenericKafkaEgressSpec(id, kafkaAddress, producerSemantic, properties);
}
}
private static class ProducerSemanticJsonDeserializer
extends JsonDeserializer<KafkaProducerSemantic> {
@Override
public KafkaProducerSemantic deserialize(
JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException {
final ObjectNode producerSemanticNode = jsonParser.readValueAs(ObjectNode.class);
final String semanticTypeString = producerSemanticNode.get("type").asText();
switch (semanticTypeString) {
case "at-least-once":
return KafkaProducerSemantic.atLeastOnce();
case "exactly-once":
return KafkaProducerSemantic.exactlyOnce(parseTransactionTimeout(producerSemanticNode));
case "none":
return KafkaProducerSemantic.none();
default:
throw new IllegalArgumentException(
"Invalid delivery semantic type: "
+ semanticTypeString
+ "; valid types are [at-least-once, exactly-once, none]");
}
}
}
private static Duration parseTransactionTimeout(ObjectNode producerSemanticNode) {
// Prefer deprecated millis based timeout for backwards compatibility
// then fallback to duration based configuration.
final JsonNode deprecatedTransactionTimeoutMillisNode =
producerSemanticNode.get("transactionTimeoutMillis");
if (deprecatedTransactionTimeoutMillisNode != null) {
return Duration.ofMillis(deprecatedTransactionTimeoutMillisNode.asLong());
}
return TimeUtils.parseDuration(producerSemanticNode.get("transactionTimeout").asText());
}
}
| 6,035 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/binders/egress | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/binders/egress/v1/Module.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kafka.binders.egress.v1;
import com.google.auto.service.AutoService;
import java.util.Map;
import org.apache.flink.statefun.extensions.ExtensionModule;
@AutoService(ExtensionModule.class)
public final class Module implements ExtensionModule {
@Override
public void configure(Map<String, String> globalConfigurations, Binder universeBinder) {
universeBinder.bindExtension(
GenericKafkaEgressBinderV1.KIND_TYPE, GenericKafkaEgressBinderV1.INSTANCE);
}
}
| 6,036 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/binders/egress | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/binders/egress/v1/GenericKafkaEgressSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kafka.binders.egress.v1;
import com.google.protobuf.InvalidProtocolBufferException;
import java.nio.charset.StandardCharsets;
import org.apache.flink.statefun.flink.common.types.TypedValueUtil;
import org.apache.flink.statefun.sdk.egress.generated.KafkaProducerRecord;
import org.apache.flink.statefun.sdk.kafka.KafkaEgressSerializer;
import org.apache.flink.statefun.sdk.reqreply.generated.TypedValue;
import org.apache.kafka.clients.producer.ProducerRecord;
/**
* A {@link KafkaEgressSerializer} used solely by Kafka egresses bound by {@link
* GenericKafkaEgressBinderV1}.
*
* <p>This serializer expects Protobuf messages of type {@link KafkaProducerRecord}, and simply
* transforms those into Kafka's {@link ProducerRecord}.
*/
public final class GenericKafkaEgressSerializer implements KafkaEgressSerializer<TypedValue> {
private static final long serialVersionUID = 1L;
@Override
public ProducerRecord<byte[], byte[]> serialize(TypedValue message) {
KafkaProducerRecord protobufProducerRecord = asKafkaProducerRecord(message);
return toProducerRecord(protobufProducerRecord);
}
private static KafkaProducerRecord asKafkaProducerRecord(TypedValue message) {
if (!TypedValueUtil.isProtobufTypeOf(message, KafkaProducerRecord.getDescriptor())) {
throw new IllegalStateException(
"The generic Kafka egress expects only messages of type "
+ KafkaProducerRecord.class.getName());
}
try {
return KafkaProducerRecord.parseFrom(message.getValue());
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException(
"Unable to unpack message as a " + KafkaProducerRecord.class.getName(), e);
}
}
private static ProducerRecord<byte[], byte[]> toProducerRecord(
KafkaProducerRecord protobufProducerRecord) {
final String key = protobufProducerRecord.getKey();
final String topic = protobufProducerRecord.getTopic();
final byte[] valueBytes = protobufProducerRecord.getValueBytes().toByteArray();
if (key == null || key.isEmpty()) {
return new ProducerRecord<>(topic, valueBytes);
} else {
return new ProducerRecord<>(topic, key.getBytes(StandardCharsets.UTF_8), valueBytes);
}
}
}
| 6,037 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/binders/egress | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/binders/egress/v1/GenericKafkaEgressBinderV1.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kafka.binders.egress.v1;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonProcessingException;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.statefun.extensions.ComponentBinder;
import org.apache.flink.statefun.extensions.ComponentJsonObject;
import org.apache.flink.statefun.flink.common.json.StateFunObjectMapper;
import org.apache.flink.statefun.sdk.TypeName;
import org.apache.flink.statefun.sdk.egress.generated.KafkaProducerRecord;
import org.apache.flink.statefun.sdk.spi.StatefulFunctionModule;
/**
* Version 1 {@link ComponentBinder} for binding a Kafka egress which expects {@link
* KafkaProducerRecord} as input, and writes the wrapped value bytes to Kafka. Corresponding {@link
* TypeName} is {@code io.statefun.kafka.v1/egress}.
*
* <p>Below is an example YAML document of the {@link ComponentJsonObject} recognized by this
* binder, with the expected types of each field:
*
* <pre>
* kind: io.statefun.kafka.v1/egress (typename)
* spec: (object)
* id: com.foo.bar/my-ingress (typename)
* address: kafka-broker:9092 (string, optional)
* deliverySemantic: (object, optional)
* type: exactly-once (string)
* transactionTimeout: 15min (duration)
* properties: (array)
* - foo.config: bar (string)
* </pre>
*
* <p>The {@code deliverySemantic} can be one of the following options: {@code exactly-once}, {@code
* at-least-once}, or {@code none}.
*
* <p>Please see {@link GenericKafkaEgressSpec} for further details.
*/
final class GenericKafkaEgressBinderV1 implements ComponentBinder {
private static final ObjectMapper SPEC_OBJ_MAPPER = StateFunObjectMapper.create();
static final GenericKafkaEgressBinderV1 INSTANCE = new GenericKafkaEgressBinderV1();
static final TypeName KIND_TYPE = TypeName.parseFrom("io.statefun.kafka.v1/egress");
private GenericKafkaEgressBinderV1() {}
@Override
public void bind(
ComponentJsonObject component, StatefulFunctionModule.Binder remoteModuleBinder) {
validateComponent(component);
final JsonNode specJsonNode = component.specJsonNode();
final GenericKafkaEgressSpec spec = parseSpec(specJsonNode);
remoteModuleBinder.bindEgress(spec.toUniversalKafkaEgressSpec());
}
private static void validateComponent(ComponentJsonObject componentJsonObject) {
final TypeName targetBinderType = componentJsonObject.binderTypename();
if (!targetBinderType.equals(KIND_TYPE)) {
throw new IllegalStateException(
"Received unexpected ModuleComponent to bind: " + componentJsonObject);
}
}
private static GenericKafkaEgressSpec parseSpec(JsonNode specJsonNode) {
try {
return SPEC_OBJ_MAPPER.treeToValue(specJsonNode, GenericKafkaEgressSpec.class);
} catch (JsonProcessingException e) {
throw new RuntimeException("Error parsing a GenericKafkaEgressSpec.", e);
}
}
}
| 6,038 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/binders/ingress | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/binders/ingress/v1/RoutableKafkaIngressSpec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kafka.binders.ingress.v1;
import com.google.protobuf.Message;
import java.io.IOException;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.time.format.DateTimeParseException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Properties;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonParser;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.DeserializationContext;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonDeserializer;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.statefun.flink.io.common.json.IngressIdentifierJsonDeserializer;
import org.apache.flink.statefun.flink.io.common.json.PropertiesJsonDeserializer;
import org.apache.flink.statefun.flink.io.generated.RoutingConfig;
import org.apache.flink.statefun.flink.io.generated.TargetFunctionType;
import org.apache.flink.statefun.sdk.TypeName;
import org.apache.flink.statefun.sdk.io.IngressIdentifier;
import org.apache.flink.statefun.sdk.kafka.KafkaIngressAutoResetPosition;
import org.apache.flink.statefun.sdk.kafka.KafkaIngressBuilder;
import org.apache.flink.statefun.sdk.kafka.KafkaIngressBuilderApiExtension;
import org.apache.flink.statefun.sdk.kafka.KafkaIngressStartupPosition;
import org.apache.flink.statefun.sdk.kafka.KafkaTopicPartition;
@JsonDeserialize(builder = RoutableKafkaIngressSpec.Builder.class)
final class RoutableKafkaIngressSpec {
private final IngressIdentifier<Message> id;
private final Optional<String> kafkaAddress;
private final Optional<String> consumerGroupId;
private final Map<String, RoutingConfig> topicRoutings;
private final KafkaIngressAutoResetPosition autoOffsetResetPosition;
private final KafkaIngressStartupPosition startupPosition;
private final Properties properties;
private RoutableKafkaIngressSpec(
IngressIdentifier<Message> id,
Optional<String> kafkaAddress,
Optional<String> consumerGroupId,
Map<String, RoutingConfig> topicRoutings,
KafkaIngressAutoResetPosition autoOffsetResetPosition,
KafkaIngressStartupPosition startupPosition,
Properties properties) {
this.id = id;
this.kafkaAddress = kafkaAddress;
this.consumerGroupId = consumerGroupId;
this.topicRoutings = topicRoutings;
this.autoOffsetResetPosition = autoOffsetResetPosition;
this.startupPosition = startupPosition;
this.properties = properties;
}
public IngressIdentifier<Message> id() {
return id;
}
public org.apache.flink.statefun.sdk.kafka.KafkaIngressSpec toUniversalKafkaIngressSpec() {
final KafkaIngressBuilder<Message> builder = KafkaIngressBuilder.forIdentifier(id);
kafkaAddress.ifPresent(builder::withKafkaAddress);
consumerGroupId.ifPresent(builder::withConsumerGroupId);
topicRoutings.keySet().forEach(builder::withTopic);
builder.withAutoResetPosition(autoOffsetResetPosition);
builder.withStartupPosition(startupPosition);
builder.withProperties(properties);
KafkaIngressBuilderApiExtension.withDeserializer(
builder, new RoutableKafkaIngressDeserializer(topicRoutings));
return builder.build();
}
@JsonPOJOBuilder
public static class Builder {
private final IngressIdentifier<Message> id;
private Optional<String> kafkaAddress = Optional.empty();
private Optional<String> consumerGroupId = Optional.empty();
private Map<String, RoutingConfig> topicRoutings = new HashMap<>();
private KafkaIngressAutoResetPosition autoOffsetResetPosition =
KafkaIngressAutoResetPosition.LATEST;
private KafkaIngressStartupPosition startupPosition = KafkaIngressStartupPosition.fromLatest();
private Properties properties = new Properties();
@JsonCreator
private Builder(
@JsonProperty("id") @JsonDeserialize(using = IngressIdentifierJsonDeserializer.class)
IngressIdentifier<Message> id) {
this.id = Objects.requireNonNull(id);
}
@JsonProperty("address")
public Builder withKafkaAddress(String address) {
Objects.requireNonNull(address);
this.kafkaAddress = Optional.of(address);
return this;
}
@JsonProperty("consumerGroupId")
public Builder withConsumerGroupId(String consumerGroupId) {
Objects.requireNonNull(consumerGroupId);
this.consumerGroupId = Optional.of(consumerGroupId);
return this;
}
@JsonProperty("topics")
@JsonDeserialize(using = TopicRoutingsJsonDeserializer.class)
public Builder withTopicRoutings(Map<String, RoutingConfig> topicRoutings) {
this.topicRoutings = Objects.requireNonNull(topicRoutings);
return this;
}
@JsonProperty("autoOffsetResetPosition")
@JsonDeserialize(using = AutoOffsetResetPositionJsonDeserializer.class)
public Builder withAutoOffsetResetPosition(
KafkaIngressAutoResetPosition autoOffsetResetPosition) {
this.autoOffsetResetPosition = Objects.requireNonNull(autoOffsetResetPosition);
return this;
}
@JsonProperty("startupPosition")
@JsonDeserialize(using = StartupPositionJsonDeserializer.class)
public Builder withStartupPosition(KafkaIngressStartupPosition startupPosition) {
this.startupPosition = Objects.requireNonNull(startupPosition);
return this;
}
@JsonProperty("properties")
@JsonDeserialize(using = PropertiesJsonDeserializer.class)
public Builder withProperties(Properties properties) {
this.properties = Objects.requireNonNull(properties);
return this;
}
public RoutableKafkaIngressSpec build() {
return new RoutableKafkaIngressSpec(
id,
kafkaAddress,
consumerGroupId,
topicRoutings,
autoOffsetResetPosition,
startupPosition,
properties);
}
}
private static class TopicRoutingsJsonDeserializer
extends JsonDeserializer<Map<String, RoutingConfig>> {
@Override
public Map<String, RoutingConfig> deserialize(
JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException {
final ObjectNode[] routingJsonNodes = jsonParser.readValueAs(ObjectNode[].class);
final Map<String, RoutingConfig> result = new HashMap<>(routingJsonNodes.length);
for (ObjectNode routingJsonNode : routingJsonNodes) {
final RoutingConfig routingConfig =
RoutingConfig.newBuilder()
.setTypeUrl(routingJsonNode.get("valueType").textValue())
.addAllTargetFunctionTypes(parseTargetFunctions(routingJsonNode))
.build();
result.put(routingJsonNode.get("topic").asText(), routingConfig);
}
return result;
}
}
private static class AutoOffsetResetPositionJsonDeserializer
extends JsonDeserializer<KafkaIngressAutoResetPosition> {
@Override
public KafkaIngressAutoResetPosition deserialize(
JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException {
try {
return KafkaIngressAutoResetPosition.valueOf(
jsonParser.getText().toUpperCase(Locale.ENGLISH));
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(
"Invalid autoOffsetResetPosition: "
+ jsonParser.getText()
+ "; valid values are "
+ Arrays.toString(KafkaIngressAutoResetPosition.values()),
e);
}
}
}
private static class StartupPositionJsonDeserializer
extends JsonDeserializer<KafkaIngressStartupPosition> {
private static final String STARTUP_DATE_PATTERN = "yyyy-MM-dd HH:mm:ss.SSS Z";
private static final DateTimeFormatter STARTUP_DATE_FORMATTER =
DateTimeFormatter.ofPattern(STARTUP_DATE_PATTERN);
@Override
public KafkaIngressStartupPosition deserialize(
JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException {
final ObjectNode startupPositionNode = jsonParser.readValueAs(ObjectNode.class);
final String startupTypeString = startupPositionNode.get("type").asText();
switch (startupTypeString) {
case "group-offsets":
return KafkaIngressStartupPosition.fromGroupOffsets();
case "earliest":
return KafkaIngressStartupPosition.fromEarliest();
case "latest":
return KafkaIngressStartupPosition.fromLatest();
case "specific-offsets":
return KafkaIngressStartupPosition.fromSpecificOffsets(
parseSpecificStartupOffsetsMap(startupPositionNode));
case "date":
return KafkaIngressStartupPosition.fromDate(parseStartupDate(startupPositionNode));
default:
throw new IllegalArgumentException(
"Invalid startup position type: "
+ startupTypeString
+ "; valid values are [group-offsets, earliest, latest, specific-offsets, date]");
}
}
}
private static List<TargetFunctionType> parseTargetFunctions(JsonNode routingJsonNode) {
final Iterable<JsonNode> targetFunctionNodes = routingJsonNode.get("targets");
return StreamSupport.stream(targetFunctionNodes.spliterator(), false)
.map(RoutableKafkaIngressSpec::parseTargetFunctionType)
.collect(Collectors.toList());
}
private static TargetFunctionType parseTargetFunctionType(JsonNode targetFunctionNode) {
final TypeName targetType = TypeName.parseFrom(targetFunctionNode.asText());
return TargetFunctionType.newBuilder()
.setNamespace(targetType.namespace())
.setType(targetType.name())
.build();
}
private static Map<KafkaTopicPartition, Long> parseSpecificStartupOffsetsMap(
ObjectNode startupPositionNode) {
final Iterable<JsonNode> offsetNodes = startupPositionNode.get("offsets");
final Map<KafkaTopicPartition, Long> offsets = new HashMap<>();
offsetNodes.forEach(
jsonNode -> {
Map.Entry<String, JsonNode> offsetNode = jsonNode.fields().next();
offsets.put(
KafkaTopicPartition.fromString(offsetNode.getKey()), offsetNode.getValue().asLong());
});
return offsets;
}
private static ZonedDateTime parseStartupDate(ObjectNode startupPositionNode) {
final String dateString = startupPositionNode.get("date").asText();
try {
return ZonedDateTime.parse(
dateString, StartupPositionJsonDeserializer.STARTUP_DATE_FORMATTER);
} catch (DateTimeParseException e) {
throw new IllegalArgumentException(
"Unable to parse date string for startup position: "
+ dateString
+ "; the date should conform to the pattern "
+ StartupPositionJsonDeserializer.STARTUP_DATE_PATTERN,
e);
}
}
}
| 6,039 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/binders/ingress | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/binders/ingress/v1/RoutableKafkaIngressDeserializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kafka.binders.ingress.v1;
import com.google.protobuf.Message;
import com.google.protobuf.MoreByteStrings;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import org.apache.flink.statefun.flink.io.generated.AutoRoutable;
import org.apache.flink.statefun.flink.io.generated.RoutingConfig;
import org.apache.flink.statefun.sdk.TypeName;
import org.apache.kafka.clients.consumer.ConsumerRecord;
public final class RoutableKafkaIngressDeserializer
implements org.apache.flink.statefun.sdk.kafka.KafkaIngressDeserializer<Message> {
private static final long serialVersionUID = 1L;
private final Map<String, RoutingConfig> routingConfigs;
public RoutableKafkaIngressDeserializer(Map<String, RoutingConfig> routingConfigs) {
if (routingConfigs == null || routingConfigs.isEmpty()) {
throw new IllegalArgumentException(
"Routing config for routable Kafka ingress cannot be empty.");
}
this.routingConfigs = routingConfigs;
}
@Override
public Message deserialize(ConsumerRecord<byte[], byte[]> input) {
final String topic = input.topic();
final byte[] payload = input.value();
final byte[] key = requireNonNullKey(input.key());
final String id = new String(key, StandardCharsets.UTF_8);
final RoutingConfig routingConfig = routingConfigs.get(topic);
if (routingConfig == null) {
throw new IllegalStateException(
"Consumed a record from topic [" + topic + "], but no routing config was specified.");
}
return AutoRoutable.newBuilder()
.setConfig(routingConfig)
.setId(id)
.setPayloadBytes(MoreByteStrings.wrap(payload))
.build();
}
private byte[] requireNonNullKey(byte[] key) {
if (key == null) {
TypeName tpe = RoutableKafkaIngressBinderV1.KIND_TYPE;
throw new IllegalStateException(
"The "
+ tpe.namespace()
+ "/"
+ tpe.name()
+ " ingress requires a UTF-8 key set for each record.");
}
return key;
}
}
| 6,040 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/binders/ingress | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/binders/ingress/v1/Module.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kafka.binders.ingress.v1;
import com.google.auto.service.AutoService;
import java.util.Map;
import org.apache.flink.statefun.extensions.ExtensionModule;
@AutoService(ExtensionModule.class)
public final class Module implements ExtensionModule {
@Override
public void configure(Map<String, String> globalConfigurations, Binder universeBinder) {
universeBinder.bindExtension(
RoutableKafkaIngressBinderV1.KIND_TYPE, RoutableKafkaIngressBinderV1.INSTANCE);
}
}
| 6,041 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/binders/ingress | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/flink/io/kafka/binders/ingress/v1/RoutableKafkaIngressBinderV1.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kafka.binders.ingress.v1;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonProcessingException;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.statefun.extensions.ComponentBinder;
import org.apache.flink.statefun.extensions.ComponentJsonObject;
import org.apache.flink.statefun.flink.common.json.StateFunObjectMapper;
import org.apache.flink.statefun.flink.io.common.AutoRoutableProtobufRouter;
import org.apache.flink.statefun.sdk.TypeName;
import org.apache.flink.statefun.sdk.spi.StatefulFunctionModule;
/**
* Version 1 {@link ComponentBinder} for binding a Kafka ingress which automatically routes records
* to target functions using the record key as the function id. Corresponding {@link TypeName} is
* {@code io.statefun.kafka.v1/ingress}.
*
* <p>Below is an example YAML document of the {@link ComponentJsonObject} recognized by this
* binder, with the expected types of each field:
*
* <pre>
* kind: io.statefun.kafka.v1/ingress (typename)
* spec: (object)
* id: com.foo.bar/my-ingress (typename)
* address: kafka-broker:9092 (string, optional)
* consumerGroupId: my-group-id (string, optional)
* topics: (array)
* - topic: topic-1 (string)
* valueType: com.foo.bar/my-type-1 (typename)
* targets: (array)
* - com.mycomp.foo/function-1 (typename)
* - ...
* - ...
* autoOffsetResetPosition: earliest (string, optional)
* startupPosition: (object)
* type: earliest (string)
* properties: (array, optional)
* - foo.config: bar (string)
* </pre>
*
* <p>The {@code autoOffsetResetPosition} can be one of the following options: {@code earliest} or
* {@code latest}.
*
* <p>Furthermore, the {@code startupPosition} can be of one of the following options: {@code
* earliest}, {@code latest}, {@code group-offsets}, {@code specific-offsets}, or {@code date}.
* Please see {@link RoutableKafkaIngressSpec} for further details.
*/
final class RoutableKafkaIngressBinderV1 implements ComponentBinder {
private static final ObjectMapper SPEC_OBJ_MAPPER = StateFunObjectMapper.create();
static final RoutableKafkaIngressBinderV1 INSTANCE = new RoutableKafkaIngressBinderV1();
static final TypeName KIND_TYPE = TypeName.parseFrom("io.statefun.kafka.v1/ingress");
private RoutableKafkaIngressBinderV1() {}
@Override
public void bind(
ComponentJsonObject component, StatefulFunctionModule.Binder remoteModuleBinder) {
validateComponent(component);
final JsonNode specJsonNode = component.specJsonNode();
final RoutableKafkaIngressSpec spec = parseSpec(specJsonNode);
remoteModuleBinder.bindIngress(spec.toUniversalKafkaIngressSpec());
remoteModuleBinder.bindIngressRouter(spec.id(), new AutoRoutableProtobufRouter());
}
private static void validateComponent(ComponentJsonObject componentJsonObject) {
final TypeName targetBinderType = componentJsonObject.binderTypename();
if (!targetBinderType.equals(KIND_TYPE)) {
throw new IllegalStateException(
"Received unexpected ModuleComponent to bind: " + componentJsonObject);
}
}
private static RoutableKafkaIngressSpec parseSpec(JsonNode specJsonNode) {
try {
return SPEC_OBJ_MAPPER.treeToValue(specJsonNode, RoutableKafkaIngressSpec.class);
} catch (JsonProcessingException e) {
throw new RuntimeException("Error parsing an AutoRoutableKafkaIngressSpec.", e);
}
}
}
| 6,042 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/sdk/kinesis | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/sdk/kinesis/ingress/KinesisIngressBuilderApiExtension.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.sdk.kinesis.ingress;
public final class KinesisIngressBuilderApiExtension {
private KinesisIngressBuilderApiExtension() {}
public static <T> void withDeserializer(
KinesisIngressBuilder<T> kinesisIngressBuilder, KinesisIngressDeserializer<T> deserializer) {
kinesisIngressBuilder.withDeserializer(deserializer);
}
}
| 6,043 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/sdk | Create_ds/flink-statefun/statefun-flink/statefun-flink-io-bundle/src/main/java/org/apache/flink/statefun/sdk/kafka/KafkaIngressBuilderApiExtension.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.sdk.kafka;
public class KafkaIngressBuilderApiExtension {
public static <T> void withDeserializer(
KafkaIngressBuilder<T> kafkaIngressBuilder, KafkaIngressDeserializer<T> deserializer) {
kafkaIngressBuilder.withDeserializer(deserializer);
}
}
| 6,044 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/test/java/org/apache/flink/statefun/flink | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/test/java/org/apache/flink/statefun/flink/datastream/RequestReplyFunctionBuilderTest.java | package org.apache.flink.statefun.flink.datastream;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.notNullValue;
import static org.junit.Assert.assertEquals;
import java.net.URI;
import java.net.URISyntaxException;
import java.time.Duration;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonProcessingException;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.statefun.flink.core.httpfn.DefaultHttpRequestReplyClientSpec;
import org.apache.flink.statefun.flink.core.httpfn.HttpFunctionEndpointSpec;
import org.apache.flink.statefun.flink.core.httpfn.TransportClientConstants;
import org.apache.flink.statefun.sdk.FunctionType;
import org.junit.Test;
public class RequestReplyFunctionBuilderTest {
/** Test that a synchronous client spec can be created specifying all values. */
@Test
public void clientSpecCanBeCreatedWithAllValues()
throws URISyntaxException, JsonProcessingException {
final FunctionType functionType = new FunctionType("foobar", "barfoo");
final URI uri = new URI("foobar");
final int maxNumBatchRequests = 100;
final Duration connectTimeout = Duration.ofSeconds(16);
final Duration callTimeout = Duration.ofSeconds(21);
final Duration readTimeout = Duration.ofSeconds(11);
final Duration writeTimeout = Duration.ofSeconds(12);
final RequestReplyFunctionBuilder builder =
StatefulFunctionBuilder.requestReplyFunctionBuilder(functionType, uri)
.withMaxNumBatchRequests(maxNumBatchRequests)
.withMaxRequestDuration(callTimeout)
.withConnectTimeout(connectTimeout)
.withReadTimeout(readTimeout)
.withWriteTimeout(writeTimeout);
HttpFunctionEndpointSpec spec = builder.spec();
assertThat(spec, notNullValue());
assertEquals(maxNumBatchRequests, spec.maxNumBatchRequests());
assertEquals(functionType, spec.targetFunctions().asSpecificFunctionType());
assertEquals(
spec.transportClientFactoryType(), TransportClientConstants.OKHTTP_CLIENT_FACTORY_TYPE);
assertEquals(uri, spec.urlPathTemplate().apply(functionType));
ObjectNode transportClientProperties = spec.transportClientProperties();
DefaultHttpRequestReplyClientSpec clientSpec =
StatefulFunctionBuilder.CLIENT_SPEC_OBJ_MAPPER.treeToValue(
transportClientProperties, DefaultHttpRequestReplyClientSpec.class);
assertThat(clientSpec, notNullValue());
DefaultHttpRequestReplyClientSpec.Timeouts timeouts = clientSpec.getTimeouts();
assertEquals(callTimeout, timeouts.getCallTimeout());
assertEquals(connectTimeout, timeouts.getConnectTimeout());
assertEquals(readTimeout, timeouts.getReadTimeout());
assertEquals(writeTimeout, timeouts.getWriteTimeout());
}
/**
* Test that a synchronous client spec can be created specifying some values, using defaults for
* others.
*/
@Test
public void clientSpecCanBeCreatedWithSomeValues()
throws URISyntaxException, JsonProcessingException {
final FunctionType functionType = new FunctionType("foobar", "barfoo");
final URI uri = new URI("foobar");
final int maxNumBatchRequests = 100;
final Duration connectTimeout = Duration.ofSeconds(16);
final Duration callTimeout = Duration.ofSeconds(21);
final RequestReplyFunctionBuilder builder =
StatefulFunctionBuilder.requestReplyFunctionBuilder(functionType, uri)
.withMaxNumBatchRequests(maxNumBatchRequests)
.withMaxRequestDuration(callTimeout)
.withConnectTimeout(connectTimeout);
HttpFunctionEndpointSpec spec = builder.spec();
assertThat(spec, notNullValue());
assertEquals(maxNumBatchRequests, spec.maxNumBatchRequests());
assertEquals(functionType, spec.targetFunctions().asSpecificFunctionType());
assertEquals(
spec.transportClientFactoryType(), TransportClientConstants.OKHTTP_CLIENT_FACTORY_TYPE);
assertEquals(uri, spec.urlPathTemplate().apply(functionType));
ObjectNode transportClientProperties = spec.transportClientProperties();
DefaultHttpRequestReplyClientSpec clientSpec =
StatefulFunctionBuilder.CLIENT_SPEC_OBJ_MAPPER.treeToValue(
transportClientProperties, DefaultHttpRequestReplyClientSpec.class);
assertThat(clientSpec, notNullValue());
DefaultHttpRequestReplyClientSpec.Timeouts timeouts = clientSpec.getTimeouts();
assertEquals(callTimeout, timeouts.getCallTimeout());
assertEquals(connectTimeout, timeouts.getConnectTimeout());
assertEquals(
DefaultHttpRequestReplyClientSpec.Timeouts.DEFAULT_HTTP_READ_TIMEOUT,
timeouts.getReadTimeout());
assertEquals(
DefaultHttpRequestReplyClientSpec.Timeouts.DEFAULT_HTTP_WRITE_TIMEOUT,
timeouts.getWriteTimeout());
}
/** Test that a synchronous client spec can be created via the deprecated method. */
@Test
public void clientSpecCanBeCreatedViaDeprecatedMethod() throws URISyntaxException {
final RequestReplyFunctionBuilder requestReplyFunctionBuilder =
RequestReplyFunctionBuilder.requestReplyFunctionBuilder(
new FunctionType("foobar", "barfoo"), new URI("foobar"));
assertThat(requestReplyFunctionBuilder.spec(), notNullValue());
}
}
| 6,045 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/test/java/org/apache/flink/statefun/flink | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/test/java/org/apache/flink/statefun/flink/datastream/AsyncRequestReplyFunctionBuilderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.datastream;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.notNullValue;
import static org.junit.Assert.assertEquals;
import java.net.URI;
import java.net.URISyntaxException;
import java.time.Duration;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonProcessingException;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.statefun.flink.core.httpfn.HttpFunctionEndpointSpec;
import org.apache.flink.statefun.flink.core.httpfn.TransportClientConstants;
import org.apache.flink.statefun.flink.core.nettyclient.NettyRequestReplySpec;
import org.apache.flink.statefun.sdk.FunctionType;
import org.junit.Test;
public class AsyncRequestReplyFunctionBuilderTest {
/** Test that an asynchronous client spec can be created specifying all values */
@Test
public void asyncClientSpecCanBeCreatedWithAllValues()
throws URISyntaxException, JsonProcessingException {
final FunctionType functionType = new FunctionType("foobar", "barfoo");
final URI uri = new URI("foobar");
final int maxNumBatchRequests = 100;
final Duration connectTimeout = Duration.ofSeconds(1);
final Duration callTimeout = Duration.ofSeconds(2);
final Duration pooledConnectionTTL = Duration.ofSeconds(3);
final int connectionPoolMaxSize = 10;
final int maxRequestOrResponseSizeInBytes = 10000;
final AsyncRequestReplyFunctionBuilder builder =
StatefulFunctionBuilder.asyncRequestReplyFunctionBuilder(functionType, uri)
.withMaxNumBatchRequests(maxNumBatchRequests)
.withMaxRequestDuration(callTimeout)
.withConnectTimeout(connectTimeout)
.withPooledConnectionTTL(pooledConnectionTTL)
.withConnectionPoolMaxSize(connectionPoolMaxSize)
.withMaxRequestOrResponseSizeInBytes(maxRequestOrResponseSizeInBytes);
HttpFunctionEndpointSpec spec = builder.spec();
assertThat(spec, notNullValue());
assertEquals(maxNumBatchRequests, spec.maxNumBatchRequests());
assertEquals(functionType, spec.targetFunctions().asSpecificFunctionType());
assertEquals(
spec.transportClientFactoryType(), TransportClientConstants.ASYNC_CLIENT_FACTORY_TYPE);
assertEquals(uri, spec.urlPathTemplate().apply(functionType));
ObjectNode transportClientProperties = spec.transportClientProperties();
NettyRequestReplySpec nettySpec =
StatefulFunctionBuilder.CLIENT_SPEC_OBJ_MAPPER.treeToValue(
transportClientProperties, NettyRequestReplySpec.class);
assertThat(nettySpec, notNullValue());
assertEquals(callTimeout, nettySpec.callTimeout);
assertEquals(connectTimeout, nettySpec.connectTimeout);
assertEquals(pooledConnectionTTL, nettySpec.pooledConnectionTTL);
assertEquals(connectionPoolMaxSize, nettySpec.connectionPoolMaxSize);
assertEquals(maxRequestOrResponseSizeInBytes, nettySpec.maxRequestOrResponseSizeInBytes);
}
/**
* Test that an asynchronous client spec can be created specifying some values, using defaults for
* others.
*/
@Test
public void asyncClientSpecCanBeCreatedWithSomeValues()
throws URISyntaxException, JsonProcessingException {
final FunctionType functionType = new FunctionType("foobar", "barfoo");
final URI uri = new URI("foobar");
final int maxNumBatchRequests = 100;
final Duration callTimeout = Duration.ofSeconds(2);
final Duration pooledConnectionTTL = Duration.ofSeconds(3);
final int maxRequestOrResponseSizeInBytes = 10000;
final AsyncRequestReplyFunctionBuilder builder =
StatefulFunctionBuilder.asyncRequestReplyFunctionBuilder(functionType, uri)
.withMaxNumBatchRequests(maxNumBatchRequests)
.withMaxRequestDuration(callTimeout)
.withPooledConnectionTTL(pooledConnectionTTL)
.withMaxRequestOrResponseSizeInBytes(maxRequestOrResponseSizeInBytes);
HttpFunctionEndpointSpec spec = builder.spec();
assertThat(spec, notNullValue());
assertEquals(maxNumBatchRequests, spec.maxNumBatchRequests());
assertEquals(functionType, spec.targetFunctions().asSpecificFunctionType());
assertEquals(
spec.transportClientFactoryType(), TransportClientConstants.ASYNC_CLIENT_FACTORY_TYPE);
assertEquals(uri, spec.urlPathTemplate().apply(functionType));
ObjectNode transportClientProperties = spec.transportClientProperties();
NettyRequestReplySpec nettySpec =
StatefulFunctionBuilder.CLIENT_SPEC_OBJ_MAPPER.treeToValue(
transportClientProperties, NettyRequestReplySpec.class);
assertThat(nettySpec, notNullValue());
assertEquals(callTimeout, nettySpec.callTimeout);
assertEquals(NettyRequestReplySpec.DEFAULT_CONNECT_TIMEOUT, nettySpec.connectTimeout);
assertEquals(pooledConnectionTTL, nettySpec.pooledConnectionTTL);
assertEquals(
NettyRequestReplySpec.DEFAULT_CONNECTION_POOL_MAX_SIZE, nettySpec.connectionPoolMaxSize);
assertEquals(maxRequestOrResponseSizeInBytes, nettySpec.maxRequestOrResponseSizeInBytes);
}
}
| 6,046 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/test/java/org/apache/flink/statefun/flink | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/test/java/org/apache/flink/statefun/flink/datastream/SerializableHttpFunctionProviderTest.java | package org.apache.flink.statefun.flink.datastream;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import static org.junit.Assert.assertEquals;
import org.apache.flink.statefun.flink.core.httpfn.DefaultHttpRequestReplyClientFactory;
import org.apache.flink.statefun.flink.core.httpfn.TransportClientConstants;
import org.apache.flink.statefun.flink.core.nettyclient.NettyRequestReplyClientFactory;
import org.junit.Test;
public class SerializableHttpFunctionProviderTest {
/** Validate the mapping from transport type to client-factory type. */
@Test
public void functionProviderShouldUseProperClientFactory() {
assertEquals(
DefaultHttpRequestReplyClientFactory.INSTANCE,
SerializableHttpFunctionProvider.getClientFactory(
TransportClientConstants.OKHTTP_CLIENT_FACTORY_TYPE));
assertEquals(
NettyRequestReplyClientFactory.INSTANCE,
SerializableHttpFunctionProvider.getClientFactory(
TransportClientConstants.ASYNC_CLIENT_FACTORY_TYPE));
}
}
| 6,047 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/main/resources/META-INF | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/main/resources/META-INF/licenses/LICENSE.protobuf-java | Copyright 2008 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Code generated by the Protocol Buffer compiler is owned by the owner
of the input file used when generating it. This code is not
standalone and requires a support library to be linked with it. This
support library is itself covered by the above license. | 6,048 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/main/java/org/apache/flink/statefun/flink | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/main/java/org/apache/flink/statefun/flink/datastream/StatefulFunctionEgressStreams.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.datastream;
import java.util.Map;
import java.util.Objects;
import org.apache.flink.annotation.Internal;
import org.apache.flink.statefun.sdk.io.EgressIdentifier;
import org.apache.flink.streaming.api.datastream.DataStream;
/**
* StatefulFunctionEgressStreams - this class holds a handle for every egress stream defined via
* {@link StatefulFunctionDataStreamBuilder#withEgressId(EgressIdentifier)}. see {@link
* #getDataStreamForEgressId(EgressIdentifier)}.
*/
public final class StatefulFunctionEgressStreams {
private final Map<EgressIdentifier<?>, DataStream<?>> egresses;
@Internal
StatefulFunctionEgressStreams(Map<EgressIdentifier<?>, DataStream<?>> egresses) {
this.egresses = Objects.requireNonNull(egresses);
}
/**
* Returns the {@link DataStream} that represents a stateful functions egress for an {@link
* EgressIdentifier}.
*
* <p>Messages that are sent to an egress with the supplied id, (via {@link
* org.apache.flink.statefun.sdk.Context#send(EgressIdentifier, Object)}) would result in the
* {@link DataStream} returned from that method.
*
* @param id the egress id, as provided to {@link
* StatefulFunctionDataStreamBuilder#withEgressId(EgressIdentifier)}.
* @param <T> the egress message type.
* @return a data stream that represents messages sent to the provided egress.
*/
@SuppressWarnings("unchecked")
public <T> DataStream<T> getDataStreamForEgressId(EgressIdentifier<T> id) {
Objects.requireNonNull(id);
DataStream<?> dataStream = egresses.get(id);
if (dataStream == null) {
throw new IllegalArgumentException("Unknown data stream for egress " + id);
}
return (DataStream<T>) dataStream;
}
}
| 6,049 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/main/java/org/apache/flink/statefun/flink | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/main/java/org/apache/flink/statefun/flink/datastream/SerializableHttpFunctionProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.datastream;
import java.util.Objects;
import javax.annotation.Nullable;
import javax.annotation.concurrent.NotThreadSafe;
import org.apache.flink.annotation.Internal;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.statefun.flink.core.httpfn.DefaultHttpRequestReplyClientFactory;
import org.apache.flink.statefun.flink.core.httpfn.HttpFunctionEndpointSpec;
import org.apache.flink.statefun.flink.core.httpfn.HttpFunctionProvider;
import org.apache.flink.statefun.flink.core.httpfn.TransportClientConstants;
import org.apache.flink.statefun.flink.core.nettyclient.NettyRequestReplyClientFactory;
import org.apache.flink.statefun.flink.core.reqreply.RequestReplyClientFactory;
import org.apache.flink.statefun.sdk.FunctionType;
import org.apache.flink.statefun.sdk.StatefulFunction;
import org.apache.flink.statefun.sdk.TypeName;
@NotThreadSafe
@Internal
final class SerializableHttpFunctionProvider implements SerializableStatefulFunctionProvider {
private static final long serialVersionUID = 1;
private final HttpFunctionEndpointSpec spec;
private transient @Nullable HttpFunctionProvider delegate;
SerializableHttpFunctionProvider(HttpFunctionEndpointSpec spec) {
this.spec = Objects.requireNonNull(spec);
}
@Override
public StatefulFunction functionOfType(FunctionType type) {
if (delegate == null) {
delegate =
new HttpFunctionProvider(spec, getClientFactory(spec.transportClientFactoryType()));
}
return delegate.functionOfType(type);
}
@VisibleForTesting
static RequestReplyClientFactory getClientFactory(TypeName factoryType) {
if (factoryType.equals(TransportClientConstants.OKHTTP_CLIENT_FACTORY_TYPE)) {
return DefaultHttpRequestReplyClientFactory.INSTANCE;
} else if (factoryType.equals(TransportClientConstants.ASYNC_CLIENT_FACTORY_TYPE)) {
return NettyRequestReplyClientFactory.INSTANCE;
} else {
throw new UnsupportedOperationException(
String.format(
"Unsupported transport client factory type: %s",
factoryType.canonicalTypenameString()));
}
}
}
| 6,050 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/main/java/org/apache/flink/statefun/flink | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/main/java/org/apache/flink/statefun/flink/datastream/SerializableStatefulFunctionProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.datastream;
import java.io.Serializable;
import org.apache.flink.statefun.sdk.StatefulFunctionProvider;
/** {@inheritDoc} */
public interface SerializableStatefulFunctionProvider
extends StatefulFunctionProvider, Serializable {}
| 6,051 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/main/java/org/apache/flink/statefun/flink | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/main/java/org/apache/flink/statefun/flink/datastream/RequestReplyFunctionBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.datastream;
import java.net.URI;
import java.time.Duration;
import org.apache.flink.annotation.Internal;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.statefun.flink.core.httpfn.*;
import org.apache.flink.statefun.sdk.FunctionType;
/** A builder for RequestReply remote function type. */
public class RequestReplyFunctionBuilder extends StatefulFunctionBuilder {
/**
* Create a new builder for a remote function with a given type and an endpoint.
*
* @deprecated Use {@link StatefulFunctionBuilder#requestReplyFunctionBuilder} instead.
* @param functionType the function type that is served remotely.
* @param endpoint the endpoint that serves that remote function.
* @return a builder.
*/
@Deprecated
public static RequestReplyFunctionBuilder requestReplyFunctionBuilder(
FunctionType functionType, URI endpoint) {
return new RequestReplyFunctionBuilder(functionType, endpoint);
}
private final DefaultHttpRequestReplyClientSpec.Timeouts transportClientTimeoutsSpec =
new DefaultHttpRequestReplyClientSpec.Timeouts();
private final HttpFunctionEndpointSpec.Builder builder;
RequestReplyFunctionBuilder(FunctionType functionType, URI endpoint) {
this.builder =
HttpFunctionEndpointSpec.builder(
TargetFunctions.functionType(functionType),
new UrlPathTemplate(endpoint.toASCIIString()));
}
/**
* Set a maximum request duration. This duration spans the complete call, including connecting to
* the function endpoint, writing the request, function processing, and reading the response.
*
* @param duration the duration after which the request is considered failed.
* @return this builder.
*/
public RequestReplyFunctionBuilder withMaxRequestDuration(Duration duration) {
transportClientTimeoutsSpec.setCallTimeout(duration);
return this;
}
/**
* Set a timeout for connecting to function endpoints.
*
* @param duration the duration after which a connect attempt is considered failed.
* @return this builder.
*/
public RequestReplyFunctionBuilder withConnectTimeout(Duration duration) {
transportClientTimeoutsSpec.setConnectTimeout(duration);
return this;
}
/**
* Set a timeout for individual read IO operations during a function invocation request.
*
* @param duration the duration after which a read IO operation is considered failed.
* @return this builder.
*/
public RequestReplyFunctionBuilder withReadTimeout(Duration duration) {
transportClientTimeoutsSpec.setReadTimeout(duration);
return this;
}
/**
* Set a timeout for individual write IO operations during a function invocation request.
*
* @param duration the duration after which a write IO operation is considered failed.
* @return this builder.
*/
public RequestReplyFunctionBuilder withWriteTimeout(Duration duration) {
transportClientTimeoutsSpec.setWriteTimeout(duration);
return this;
}
/**
* Sets the max messages to batch together for a specific address.
*
* @param maxNumBatchRequests the maximum number of requests to batch for an address.
* @return this builder.
*/
public RequestReplyFunctionBuilder withMaxNumBatchRequests(int maxNumBatchRequests) {
builder.withMaxNumBatchRequests(maxNumBatchRequests);
return this;
}
/**
* Create the endpoint spec for the function.
*
* @return The endpoint spec.
*/
@Internal
@Override
HttpFunctionEndpointSpec spec() {
final TransportClientSpec transportClientSpec =
new TransportClientSpec(
TransportClientConstants.OKHTTP_CLIENT_FACTORY_TYPE,
transportClientPropertiesAsObjectNode(transportClientTimeoutsSpec));
builder.withTransport(transportClientSpec);
return builder.build();
}
private static ObjectNode transportClientPropertiesAsObjectNode(
DefaultHttpRequestReplyClientSpec.Timeouts transportClientTimeoutsSpec) {
final DefaultHttpRequestReplyClientSpec transportClientSpecPojo =
new DefaultHttpRequestReplyClientSpec();
transportClientSpecPojo.setTimeouts(transportClientTimeoutsSpec);
return transportClientSpecPojo.toJson(CLIENT_SPEC_OBJ_MAPPER);
}
}
| 6,052 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/main/java/org/apache/flink/statefun/flink | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/main/java/org/apache/flink/statefun/flink/datastream/AsyncRequestReplyFunctionBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.datastream;
import java.net.URI;
import java.time.Duration;
import org.apache.flink.annotation.Internal;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.statefun.flink.core.httpfn.*;
import org.apache.flink.statefun.flink.core.nettyclient.NettyRequestReplySpec;
import org.apache.flink.statefun.sdk.FunctionType;
import org.apache.flink.util.TimeUtils;
/** A builder for async RequestReply remote function type. */
public class AsyncRequestReplyFunctionBuilder extends StatefulFunctionBuilder {
private final ObjectNode transportProperties;
private final HttpFunctionEndpointSpec.Builder builder;
AsyncRequestReplyFunctionBuilder(FunctionType functionType, URI endpoint) {
this.transportProperties = CLIENT_SPEC_OBJ_MAPPER.createObjectNode();
this.builder =
HttpFunctionEndpointSpec.builder(
TargetFunctions.functionType(functionType),
new UrlPathTemplate(endpoint.toASCIIString()));
}
/**
* Set a maximum request duration. This duration spans the complete call, including connecting to
* the function endpoint, writing the request, function processing, and reading the response.
*
* @param duration the duration after which the request is considered failed.
* @return this builder.
*/
public AsyncRequestReplyFunctionBuilder withMaxRequestDuration(Duration duration) {
transportProperties.put(
NettyRequestReplySpec.CALL_TIMEOUT_PROPERTY, TimeUtils.formatWithHighestUnit(duration));
return this;
}
/**
* Set a timeout for connecting to function endpoints.
*
* @param duration the duration after which a connect attempt is considered failed.
* @return this builder.
*/
public AsyncRequestReplyFunctionBuilder withConnectTimeout(Duration duration) {
transportProperties.put(
NettyRequestReplySpec.CONNECT_TIMEOUT_PROPERTY, TimeUtils.formatWithHighestUnit(duration));
return this;
}
/**
* The amount of time a connection will live in the connection pool. Set to zero to disable, the
* connection will be evicted from the pool after that time.
*
* @param duration the duration after which a connection will be evicted from the pool.
* @return this builder.
*/
public AsyncRequestReplyFunctionBuilder withPooledConnectionTTL(Duration duration) {
transportProperties.put(
NettyRequestReplySpec.POOLED_CONNECTION_TTL_PROPERTY,
TimeUtils.formatWithHighestUnit(duration));
return this;
}
/**
* The maximum connection pool size.
*
* @param size the max size of the connection pool.
* @return this builder.
*/
public AsyncRequestReplyFunctionBuilder withConnectionPoolMaxSize(int size) {
transportProperties.put(NettyRequestReplySpec.CONNECTION_POOL_MAX_SIZE_PROPERTY, size);
return this;
}
/**
* The maximum size for a request or response payload.
*
* @param maxSizeInBytes the max size of the request or response payload.
* @return this builder.
*/
public AsyncRequestReplyFunctionBuilder withMaxRequestOrResponseSizeInBytes(int maxSizeInBytes) {
transportProperties.put(
NettyRequestReplySpec.MAX_REQUEST_OR_RESPONSE_SIZE_IN_BYTES_PROPERTY, maxSizeInBytes);
return this;
}
/**
* Sets the max messages to batch together for a specific address.
*
* @param maxNumBatchRequests the maximum number of requests to batch for an address.
* @return this builder.
*/
public AsyncRequestReplyFunctionBuilder withMaxNumBatchRequests(int maxNumBatchRequests) {
builder.withMaxNumBatchRequests(maxNumBatchRequests);
return this;
}
/**
* Create the endpoint spec for the function.
*
* @return The endpoint spec.
*/
@Internal
@Override
HttpFunctionEndpointSpec spec() {
final TransportClientSpec transportClientSpec =
new TransportClientSpec(
TransportClientConstants.ASYNC_CLIENT_FACTORY_TYPE, transportProperties);
builder.withTransport(transportClientSpec);
return builder.build();
}
}
| 6,053 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/main/java/org/apache/flink/statefun/flink | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/main/java/org/apache/flink/statefun/flink/datastream/StatefulFunctionDataStreamBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.datastream;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import javax.annotation.Nullable;
import org.apache.flink.statefun.flink.core.StatefulFunctionsConfig;
import org.apache.flink.statefun.flink.core.feedback.FeedbackKey;
import org.apache.flink.statefun.flink.core.httpfn.HttpFunctionEndpointSpec;
import org.apache.flink.statefun.flink.core.message.Message;
import org.apache.flink.statefun.flink.core.message.RoutableMessage;
import org.apache.flink.statefun.flink.core.translation.EmbeddedTranslator;
import org.apache.flink.statefun.sdk.FunctionType;
import org.apache.flink.statefun.sdk.io.EgressIdentifier;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
/**
* Builder for a Stateful Function Application.
*
* <p>This builder allows defining all the aspects of a stateful function application. define input
* streams as ingresses, define function providers and egress ids.
*/
public final class StatefulFunctionDataStreamBuilder {
private static final AtomicInteger FEEDBACK_INVOCATION_ID_SEQ = new AtomicInteger();
/** Creates a {@code StatefulFunctionDataStreamBuilder}. */
public static StatefulFunctionDataStreamBuilder builder(String pipelineName) {
return new StatefulFunctionDataStreamBuilder(pipelineName);
}
private StatefulFunctionDataStreamBuilder(String pipelineName) {
this.pipelineName = Objects.requireNonNull(pipelineName);
}
private final String pipelineName;
private final List<DataStream<RoutableMessage>> definedIngresses = new ArrayList<>();
private final Map<FunctionType, SerializableStatefulFunctionProvider> functionProviders =
new HashMap<>();
private final Map<FunctionType, HttpFunctionEndpointSpec> requestReplyFunctions = new HashMap<>();
private final Set<EgressIdentifier<?>> egressesIds = new LinkedHashSet<>();
@Nullable private StatefulFunctionsConfig config;
/**
* Adds an ingress of incoming messages.
*
* @param ingress an incoming stream of messages.
* @return this builder.
*/
public StatefulFunctionDataStreamBuilder withDataStreamAsIngress(
DataStream<RoutableMessage> ingress) {
Objects.requireNonNull(ingress);
definedIngresses.add(ingress);
return this;
}
/**
* Adds a function provider to this builder
*
* @param functionType the type of the function that this provider providers.
* @param provider the stateful function provider.
* @return this builder.
*/
public StatefulFunctionDataStreamBuilder withFunctionProvider(
FunctionType functionType, SerializableStatefulFunctionProvider provider) {
Objects.requireNonNull(functionType);
Objects.requireNonNull(provider);
putAndThrowIfPresent(functionProviders, functionType, provider);
return this;
}
/**
* Adds a remote RequestReply type of function provider to this builder.
*
* @param builder an already configured {@code StatefulFunctionBuilder}.
* @return this builder.
*/
public StatefulFunctionDataStreamBuilder withRequestReplyRemoteFunction(
StatefulFunctionBuilder builder) {
Objects.requireNonNull(builder);
HttpFunctionEndpointSpec spec = builder.spec();
putAndThrowIfPresent(
requestReplyFunctions, spec.targetFunctions().asSpecificFunctionType(), spec);
return this;
}
/**
* Registers an {@link EgressIdentifier}.
*
* <p>See {@link StatefulFunctionEgressStreams#getDataStreamForEgressId(EgressIdentifier)}.
*
* @param egressId an ingress id
* @return this builder.
*/
public StatefulFunctionDataStreamBuilder withEgressId(EgressIdentifier<?> egressId) {
Objects.requireNonNull(egressId);
putAndThrowIfPresent(egressesIds, egressId);
return this;
}
/**
* Set a stateful function configuration.
*
* @param configuration the stateful function configuration to set.
* @return this builder.
*/
public StatefulFunctionDataStreamBuilder withConfiguration(
StatefulFunctionsConfig configuration) {
Objects.requireNonNull(configuration);
this.config = configuration;
return this;
}
/**
* Adds Stateful Functions operators into the topology.
*
* @param env the stream execution environment.
*/
public StatefulFunctionEgressStreams build(StreamExecutionEnvironment env) {
final StatefulFunctionsConfig config =
Optional.ofNullable(this.config)
.orElseGet(() -> StatefulFunctionsConfig.fromEnvironment(env));
requestReplyFunctions.forEach(
(type, spec) -> functionProviders.put(type, new SerializableHttpFunctionProvider(spec)));
FeedbackKey<Message> key =
new FeedbackKey<>(pipelineName, FEEDBACK_INVOCATION_ID_SEQ.incrementAndGet());
EmbeddedTranslator embeddedTranslator = new EmbeddedTranslator(config, key);
Map<EgressIdentifier<?>, DataStream<?>> sideOutputs =
embeddedTranslator.translate(definedIngresses, egressesIds, functionProviders);
return new StatefulFunctionEgressStreams(sideOutputs);
}
private static <K, V> void putAndThrowIfPresent(Map<K, V> map, K key, V value) {
@Nullable V previous = map.put(key, value);
if (previous == null) {
return;
}
throw new IllegalStateException(
String.format("A binding for the key %s was previously defined.", key));
}
private static <K> void putAndThrowIfPresent(Set<K> set, K key) {
if (set.add(key)) {
return;
}
throw new IllegalStateException(
String.format("A binding for the key %s was previously defined.", key));
}
}
| 6,054 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/main/java/org/apache/flink/statefun/flink | Create_ds/flink-statefun/statefun-flink/statefun-flink-datastream/src/main/java/org/apache/flink/statefun/flink/datastream/StatefulFunctionBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.datastream;
import java.net.URI;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.statefun.flink.common.json.StateFunObjectMapper;
import org.apache.flink.statefun.flink.core.httpfn.HttpFunctionEndpointSpec;
import org.apache.flink.statefun.sdk.FunctionType;
/** Base class for statefun function builders. */
public abstract class StatefulFunctionBuilder {
/** The object mapper used to serialize the client spec object. */
static final ObjectMapper CLIENT_SPEC_OBJ_MAPPER = StateFunObjectMapper.create();
/**
* Override to provide the endpoint spec.
*
* @return The endpoint spec.
*/
abstract HttpFunctionEndpointSpec spec();
/**
* Creates a function builder using the synchronous HTTP protocol.
*
* @param functionType the function type that is served remotely.
* @param endpoint the endpoint that serves that remote function.
* @return a builder.
*/
public static RequestReplyFunctionBuilder requestReplyFunctionBuilder(
FunctionType functionType, URI endpoint) {
return new RequestReplyFunctionBuilder(functionType, endpoint);
}
/**
* Creates a function builder using the asynchronous HTTP protocol.
*
* @param functionType the function type that is served remotely.
* @param endpoint the endpoint that serves that remote function.
* @return a builder.
*/
public static AsyncRequestReplyFunctionBuilder asyncRequestReplyFunctionBuilder(
FunctionType functionType, URI endpoint) {
return new AsyncRequestReplyFunctionBuilder(functionType, endpoint);
}
}
| 6,055 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/TestUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import org.apache.flink.statefun.flink.core.generated.EnvelopeAddress;
import org.apache.flink.statefun.flink.core.message.MessageFactory;
import org.apache.flink.statefun.flink.core.message.MessageFactoryKey;
import org.apache.flink.statefun.flink.core.message.MessageFactoryType;
import org.apache.flink.statefun.sdk.Address;
import org.apache.flink.statefun.sdk.FunctionType;
@SuppressWarnings("WeakerAccess")
public class TestUtils {
public static final MessageFactory ENVELOPE_FACTORY =
MessageFactory.forKey(MessageFactoryKey.forType(MessageFactoryType.WITH_KRYO_PAYLOADS, null));
public static final FunctionType FUNCTION_TYPE = new FunctionType("test", "a");
public static final Address FUNCTION_1_ADDR = new Address(FUNCTION_TYPE, "a-1");
public static final Address FUNCTION_2_ADDR = new Address(FUNCTION_TYPE, "a-2");
public static final EnvelopeAddress DUMMY_PAYLOAD =
EnvelopeAddress.newBuilder().setNamespace("com.foo").setType("greet").setId("user-1").build();
/**
* Opens a stream of throws an exception. Does *not* close the stream
*
* @param url of the resource to open
* @return opened input stream
*/
public static InputStream openStreamOrThrow(URL url) {
try {
return url.openStream();
} catch (IOException e) {
throw new IllegalStateException("Could not open " + url.getPath(), e);
}
}
}
| 6,056 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/StatefulFunctionsUniverseTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core;
import static org.hamcrest.core.IsSame.sameInstance;
import static org.junit.Assert.assertThat;
import org.apache.flink.statefun.flink.core.message.MessageFactoryKey;
import org.apache.flink.statefun.flink.core.message.MessageFactoryType;
import org.apache.flink.statefun.sdk.TypeName;
import org.junit.Test;
public class StatefulFunctionsUniverseTest {
@Test
public void testExtensions() {
final StatefulFunctionsUniverse universe = emptyUniverse();
final ExtensionImpl extension = new ExtensionImpl();
universe.bindExtension(TypeName.parseFrom("test.namespace/test.name"), extension);
assertThat(
extension,
sameInstance(
universe.resolveExtension(
TypeName.parseFrom("test.namespace/test.name"), BaseExtension.class)));
}
private static StatefulFunctionsUniverse emptyUniverse() {
return new StatefulFunctionsUniverse(
MessageFactoryKey.forType(MessageFactoryType.WITH_PROTOBUF_PAYLOADS, null));
}
private interface BaseExtension {}
private static class ExtensionImpl implements BaseExtension {}
}
| 6,057 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/StatefulFunctionsConfigTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core;
import java.util.Arrays;
import java.util.Optional;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.CoreOptions;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.statefun.flink.core.exceptions.StatefulFunctionsInvalidConfigException;
import org.apache.flink.statefun.flink.core.message.MessageFactoryType;
import org.apache.flink.streaming.api.environment.ExecutionCheckpointingOptions;
import org.hamcrest.Matchers;
import org.junit.Assert;
import org.junit.Test;
public class StatefulFunctionsConfigTest {
private final String serializerClassName = "com.sample.Serializer";
@Test
public void testSetConfigurations() {
final String testName = "test-name";
Configuration configuration = new Configuration();
configuration.set(StatefulFunctionsConfig.FLINK_JOB_NAME, testName);
configuration.set(
StatefulFunctionsConfig.USER_MESSAGE_SERIALIZER, MessageFactoryType.WITH_CUSTOM_PAYLOADS);
configuration.set(
StatefulFunctionsConfig.USER_MESSAGE_CUSTOM_PAYLOAD_SERIALIZER_CLASS, serializerClassName);
configuration.set(
StatefulFunctionsConfig.TOTAL_MEMORY_USED_FOR_FEEDBACK_CHECKPOINTING,
MemorySize.ofMebiBytes(100));
configuration.set(StatefulFunctionsConfig.ASYNC_MAX_OPERATIONS_PER_TASK, 100);
configuration.set(
CoreOptions.ALWAYS_PARENT_FIRST_LOADER_PATTERNS_ADDITIONAL,
Arrays.asList("org.apache.flink.statefun", "org.apache.kafka", "com.google.protobuf"));
configuration.set(ExecutionCheckpointingOptions.MAX_CONCURRENT_CHECKPOINTS, 1);
configuration.setString("statefun.module.global-config.key1", "value1");
configuration.setString("statefun.module.global-config.key2", "value2");
StatefulFunctionsConfig stateFunConfig =
StatefulFunctionsConfig.fromFlinkConfiguration(configuration);
Assert.assertEquals(stateFunConfig.getFlinkJobName(), testName);
Assert.assertEquals(
stateFunConfig.getFactoryKey().getType(), MessageFactoryType.WITH_CUSTOM_PAYLOADS);
Assert.assertEquals(
stateFunConfig.getFactoryKey().getCustomPayloadSerializerClassName(),
Optional.of(serializerClassName));
Assert.assertEquals(stateFunConfig.getFeedbackBufferSize(), MemorySize.ofMebiBytes(100));
Assert.assertEquals(stateFunConfig.getMaxAsyncOperationsPerTask(), 100);
Assert.assertThat(
stateFunConfig.getGlobalConfigurations(), Matchers.hasEntry("key1", "value1"));
Assert.assertThat(
stateFunConfig.getGlobalConfigurations(), Matchers.hasEntry("key2", "value2"));
}
private static Configuration baseConfiguration() {
Configuration configuration = new Configuration();
configuration.set(StatefulFunctionsConfig.FLINK_JOB_NAME, "name");
configuration.set(
StatefulFunctionsConfig.USER_MESSAGE_SERIALIZER, MessageFactoryType.WITH_KRYO_PAYLOADS);
configuration.set(
StatefulFunctionsConfig.TOTAL_MEMORY_USED_FOR_FEEDBACK_CHECKPOINTING,
MemorySize.ofMebiBytes(100));
configuration.set(StatefulFunctionsConfig.ASYNC_MAX_OPERATIONS_PER_TASK, 100);
configuration.set(
CoreOptions.ALWAYS_PARENT_FIRST_LOADER_PATTERNS_ADDITIONAL,
Arrays.asList("org.apache.flink.statefun", "org.apache.kafka", "com.google.protobuf"));
configuration.set(ExecutionCheckpointingOptions.MAX_CONCURRENT_CHECKPOINTS, 1);
return configuration;
}
@Test(expected = StatefulFunctionsInvalidConfigException.class)
public void invalidCustomSerializerThrows() {
Configuration configuration = baseConfiguration();
configuration.set(
StatefulFunctionsConfig.USER_MESSAGE_SERIALIZER, MessageFactoryType.WITH_CUSTOM_PAYLOADS);
StatefulFunctionsConfigValidator.validate(false, configuration);
}
@Test(expected = StatefulFunctionsInvalidConfigException.class)
public void invalidNonCustomSerializerThrows() {
Configuration configuration = baseConfiguration();
configuration.set(
StatefulFunctionsConfig.USER_MESSAGE_SERIALIZER, MessageFactoryType.WITH_KRYO_PAYLOADS);
configuration.set(
StatefulFunctionsConfig.USER_MESSAGE_CUSTOM_PAYLOAD_SERIALIZER_CLASS, serializerClassName);
StatefulFunctionsConfigValidator.validate(false, configuration);
}
}
| 6,058 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/metrics/NonNegativeCounterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.metrics;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import org.apache.flink.metrics.Counter;
import org.junit.Test;
public class NonNegativeCounterTest {
@Test
public void testNonNegativeCounter() throws Exception {
Counter counter = new NonNegativeCounter();
counter.inc();
assertThat(counter.getCount(), is(1L));
counter.inc(2);
assertThat(counter.getCount(), is(3L));
counter.dec(4);
assertThat(counter.getCount(), is(0L));
counter.dec();
assertThat(counter.getCount(), is(0L));
}
}
| 6,059 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/di/ObjectContainerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.di;
import static org.hamcrest.CoreMatchers.theInstance;
import static org.hamcrest.MatcherAssert.assertThat;
import org.junit.Test;
public class ObjectContainerTest {
@Test
public void addAliasTest() {
final ObjectContainer container = new ObjectContainer();
container.add("label-1", InterfaceA.class, TestClass.class);
container.addAlias("label-2", InterfaceB.class, "label-1", InterfaceA.class);
assertThat(
container.get(InterfaceB.class, "label-2"),
theInstance(container.get(InterfaceA.class, "label-1")));
}
private interface InterfaceA {}
private interface InterfaceB {}
private static class TestClass implements InterfaceA, InterfaceB {}
}
| 6,060 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/logger/MemorySegmentPoolTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.logger;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.junit.Assert.assertThat;
import org.apache.flink.core.memory.MemorySegment;
import org.hamcrest.CoreMatchers;
import org.junit.Test;
public class MemorySegmentPoolTest {
@Test
public void emptyMemorySegmentPoolDoesNotAllocateSegments() {
MemorySegmentPool pool = new MemorySegmentPool(0);
assertThat(pool.nextSegment(), nullValue());
}
@Test
public void emptyMemorySegmentPoolOverdraftsWhenAskedTo() {
MemorySegmentPool pool = new MemorySegmentPool(0);
pool.ensureAtLeastOneSegmentPresent();
assertThat(pool.nextSegment(), notNullValue());
}
@Test
public void emptyMemorySegmentPoolOverdraftsTemporally() {
MemorySegmentPool pool = new MemorySegmentPool(0);
pool.ensureAtLeastOneSegmentPresent();
final MemorySegment overdraft = pool.nextSegment();
pool.release(overdraft);
assertThat(overdraft, notNullValue());
assertThat(overdraft.isFreed(), is(true));
assertThat(pool.nextSegment(), nullValue());
}
@Test
public void minimalAllocationUnitIsPageSize() {
MemorySegmentPool pool = new MemorySegmentPool(MemorySegmentPool.PAGE_SIZE - 1);
assertThat(pool.nextSegment(), CoreMatchers.nullValue());
}
@Test
public void poolIsAbleToAllocateTheRequiredNumberOfPages() {
final int pageCount = 10;
MemorySegmentPool pool = new MemorySegmentPool(pageCount * MemorySegmentPool.PAGE_SIZE);
for (int i = 0; i < pageCount; i++) {
MemorySegment segment = pool.nextSegment();
assertThat(segment, notNullValue());
assertThat(segment.size(), is(MemorySegmentPool.PAGE_SIZE));
}
assertThat(pool.nextSegment(), nullValue());
}
@SuppressWarnings("PointlessArithmeticExpression")
@Test
public void segmentsCanBeReturnedToThePool() {
MemorySegmentPool pool = new MemorySegmentPool(1 * MemorySegmentPool.PAGE_SIZE);
//
// we can allocate at least 1 segment
//
MemorySegment segment = pool.nextSegment();
assertThat(segment, notNullValue());
//
// we can allocate exactly 1 segment
//
assertThat(pool.nextSegment(), nullValue());
//
// return a segment to the pool
//
pool.release(segment);
//
// now we can use the segment
//
MemorySegment pooled = pool.nextSegment();
assertThat(pooled, notNullValue());
assertThat(pooled.isFreed(), is(false));
}
}
| 6,061 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/logger/OneBytePerReadByteArrayInputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.logger;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
/**
* A {@link ByteArrayInputStream} that always reads 1 byte per read, and always returns 0 on {@link
* InputStream#available()}.
*
* <p>We use this input stream in our tests to mimic extreme behaviour of "real-life" input streams,
* while still adhering to the contracts of the {@link InputStream} methods:
*
* <ul>
* <li>For {@link InputStream#read(byte[])} and {@link InputStream#read()}: read methods always
* blocks until at least 1 byte is available from the stream; it always at least reads 1 byte.
* <li>For {@link InputStream#available()}: always return 0, to imply that there are no bytes
* immediately available from the stream, and the next read will block.
* </ul>
*/
final class OneBytePerReadByteArrayInputStream extends ByteArrayInputStream {
OneBytePerReadByteArrayInputStream(byte[] byteBuffer) {
super(byteBuffer);
}
@Override
public int read(byte[] b, int off, int len) {
return super.read(b, off, Math.min(len, 1));
}
@Override
public synchronized int available() {
return 0;
}
}
| 6,062 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/logger/UnboundedFeedbackLoggerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.logger;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import java.io.*;
import java.util.ArrayList;
import java.util.function.Function;
import java.util.stream.IntStream;
import org.apache.flink.api.common.typeutils.base.IntSerializer;
import org.apache.flink.core.memory.DataInputViewStreamWrapper;
import org.apache.flink.core.memory.DataOutputSerializer;
import org.apache.flink.runtime.io.disk.iomanager.IOManagerAsync;
import org.apache.flink.statefun.flink.core.di.ObjectContainer;
import org.apache.flink.statefun.flink.core.logger.UnboundedFeedbackLogger.Header;
import org.hamcrest.Matchers;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
@SuppressWarnings("SameParameterValue")
public class UnboundedFeedbackLoggerTest {
private static IOManagerAsync IO_MANAGER;
@BeforeClass
public static void beforeClass() {
IO_MANAGER = new IOManagerAsync();
}
@AfterClass
public static void afterClass() throws Exception {
if (IO_MANAGER != null) {
IO_MANAGER.close();
IO_MANAGER = null;
}
}
@Test
public void sanity() {
UnboundedFeedbackLogger<Integer> logger = instanceUnderTest(128, 1);
ByteArrayOutputStream output = new ByteArrayOutputStream();
logger.startLogging(output);
logger.commit();
assertThat(output.size(), Matchers.greaterThan(0));
}
@Test(expected = IllegalStateException.class)
public void commitWithoutStartLoggingShouldBeIllegal() {
UnboundedFeedbackLogger<Integer> logger = instanceUnderTest(128, 1);
logger.commit();
}
@Test
public void roundTrip() throws Exception {
roundTrip(100, 1024);
}
@Test
public void roundTripWithoutElements() throws Exception {
roundTrip(0, 1024);
}
@Ignore
@Test
public void roundTripWithSpill() throws Exception {
roundTrip(1_000_000, 0);
}
@Test
public void roundTripWithHeader() throws IOException {
DataOutputSerializer out = new DataOutputSerializer(32);
Header.writeHeader(out);
out.writeInt(123);
out.writeInt(456);
InputStream in = new RandomReadLengthByteArrayInputStream(out.getCopyOfBuffer());
DataInputViewStreamWrapper view = new DataInputViewStreamWrapper(Header.skipHeaderSilently(in));
assertThat(view.readInt(), is(123));
assertThat(view.readInt(), is(456));
}
@Test
public void roundTripWithoutHeader() throws IOException {
DataOutputSerializer out = new DataOutputSerializer(32);
out.writeInt(123);
out.writeInt(456);
InputStream in = new RandomReadLengthByteArrayInputStream(out.getCopyOfBuffer());
DataInputViewStreamWrapper view = new DataInputViewStreamWrapper(Header.skipHeaderSilently(in));
assertThat(view.readInt(), is(123));
assertThat(view.readInt(), is(456));
}
@Test
public void emptyKeyGroupWithHeader() throws IOException {
DataOutputSerializer out = new DataOutputSerializer(32);
Header.writeHeader(out);
InputStream in = new RandomReadLengthByteArrayInputStream(out.getCopyOfBuffer());
DataInputViewStreamWrapper view = new DataInputViewStreamWrapper(Header.skipHeaderSilently(in));
assertThat(view.read(), is(-1));
}
@Test
public void emptyKeyGroupWithoutHeader() throws IOException {
InputStream in = new RandomReadLengthByteArrayInputStream(new byte[0]);
DataInputViewStreamWrapper view = new DataInputViewStreamWrapper(Header.skipHeaderSilently(in));
assertThat(view.read(), is(-1));
}
private void roundTrip(int numElements, int maxMemoryInBytes) throws Exception {
InputStream input = serializeKeyGroup(1, maxMemoryInBytes, numElements);
ArrayList<Integer> messages = new ArrayList<>();
UnboundedFeedbackLogger<Integer> loggerUnderTest = instanceUnderTest(1, 0);
loggerUnderTest.replyLoggedEnvelops(input, messages::add);
for (int i = 0; i < numElements; i++) {
Integer message = messages.get(i);
assertThat(message, is(i));
}
}
private ByteArrayInputStream serializeKeyGroup(int maxParallelism, long maxMemory, int numItems) {
ByteArrayOutputStream output = new ByteArrayOutputStream();
UnboundedFeedbackLogger<Integer> loggerUnderTest = instanceUnderTest(maxParallelism, maxMemory);
loggerUnderTest.startLogging(output);
for (int i = 0; i < numItems; i++) {
loggerUnderTest.append(i);
}
loggerUnderTest.commit();
return new ByteArrayInputStream(output.toByteArray());
}
@SuppressWarnings("unchecked")
private UnboundedFeedbackLogger<Integer> instanceUnderTest(int maxParallelism, long totalMemory) {
ObjectContainer container =
Loggers.unboundedSpillableLoggerContainer(
IO_MANAGER, maxParallelism, totalMemory, IntSerializer.INSTANCE, Function.identity());
container.add(
"checkpoint-stream-ops",
CheckpointedStreamOperations.class,
new NoopStreamOps(maxParallelism));
return container.get(UnboundedFeedbackLoggerFactory.class).create();
}
static final class NoopStreamOps implements CheckpointedStreamOperations {
private final int maxParallelism;
NoopStreamOps(int maxParallelism) {
this.maxParallelism = maxParallelism;
}
@Override
public void requireKeyedStateCheckpointed(OutputStream keyedStateCheckpointOutputStream) {
// noop
}
@Override
public Iterable<Integer> keyGroupList(OutputStream stream) {
IntStream range = IntStream.range(0, maxParallelism);
return range::iterator;
}
@Override
public void startNewKeyGroup(OutputStream stream, int keyGroup) {}
@Override
public Closeable acquireLease(OutputStream keyedStateCheckpointOutputStream) {
return () -> {}; // NOOP
}
}
}
| 6,063 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/logger/RandomReadLengthByteArrayInputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.logger;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.util.Random;
/**
* A {@link ByteArrayInputStream} that reads a random number of bytes per read (at least 1) up to
* the requested amount, and always returns 0 on {@link InputStream#available()}.
*
* <p>We use this input stream in our tests to mimic behaviour of "real-life" input streams, while
* still adhering to the contracts of the {@link InputStream} methods:
*
* <ul>
* <li>For {@link InputStream#read(byte[])} and {@link InputStream#read()}: read methods always
* blocks until at least 1 byte is available from the stream; it always at least reads 1 byte.
* <li>For {@link InputStream#available()}: always return 0, to imply that there are no bytes
* immediately available from the stream, and the next read will block.
* </ul>
*/
final class RandomReadLengthByteArrayInputStream extends ByteArrayInputStream {
private static final Random RANDOM = new Random();
RandomReadLengthByteArrayInputStream(byte[] byteBuffer) {
super(byteBuffer);
}
@Override
public int read(byte[] b, int off, int len) {
final int randomNumBytesToRead = RANDOM.nextInt(len) + 1;
return super.read(b, off, randomNumBytesToRead);
}
@Override
public synchronized int available() {
return 0;
}
}
| 6,064 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/logger/InputStreamUtilsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.logger;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.MatcherAssert.assertThat;
import java.io.IOException;
import java.io.InputStream;
import java.util.Arrays;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@RunWith(Parameterized.class)
public final class InputStreamUtilsTest {
private enum InputStreamType {
RANDOM_LENGTH_PER_READ,
ONE_BYTE_PER_READ
}
private final InputStreamType testInputStreamType;
public InputStreamUtilsTest(InputStreamType testInputStreamType) {
this.testInputStreamType = testInputStreamType;
}
@Parameterized.Parameters(name = "{0}")
public static Iterable<InputStreamType> testInputStreamTypes() throws IOException {
return Arrays.asList(InputStreamType.RANDOM_LENGTH_PER_READ, InputStreamType.ONE_BYTE_PER_READ);
}
@Test
public void tryReadFullyExampleUsage() throws Exception {
final byte[] testBytes = "test-data".getBytes();
final byte[] readBuffer = new byte[testBytes.length];
try (InputStream in = testInputStream(testBytes)) {
final int numReadBytes = InputStreamUtils.tryReadFully(in, readBuffer);
assertThat(numReadBytes, is(testBytes.length));
assertThat(readBuffer, is(testBytes));
assertThat(in.read(), is(-1));
}
}
@Test
public void tryReadFullyEmptyInputStream() throws Exception {
final byte[] testBytes = new byte[0];
final byte[] readBuffer = new byte[10];
try (InputStream in = testInputStream(testBytes)) {
final int numReadBytes = InputStreamUtils.tryReadFully(in, readBuffer);
assertThat(numReadBytes, is(0));
assertThat(readBuffer, is(new byte[10]));
assertThat(in.read(), is(-1));
}
}
@Test
public void tryReadFullyReadBufferSizeLargerThanInputStream() throws Exception {
final byte[] testBytes = new byte[] {-91, 11, 8};
// read buffer has larger size than the test data
final byte[] readBuffer = new byte[testBytes.length + 20];
try (InputStream in = testInputStream(testBytes)) {
final int numReadBytes = InputStreamUtils.tryReadFully(in, readBuffer);
assertThat(numReadBytes, is(testBytes.length));
assertThat(readBuffer, is(Arrays.copyOf(testBytes, readBuffer.length)));
assertThat(in.read(), is(-1));
}
}
@Test
public void tryReadFullyReadBufferSizeSmallerThanInputStream() throws Exception {
final byte[] testBytes = new byte[] {-91, 11, 8, 53, 100, 5, -100, 102, 56, 95};
// read buffer has smaller size than the test data
final byte[] readBuffer = new byte[testBytes.length - 2];
try (InputStream in = testInputStream(testBytes)) {
final int numReadBytes = InputStreamUtils.tryReadFully(in, readBuffer);
assertThat(numReadBytes, is(readBuffer.length));
assertThat(readBuffer, is(Arrays.copyOfRange(testBytes, 0, readBuffer.length)));
// assert that the input stream is not overly-read
assertThat(in.read(), is(56));
assertThat(in.read(), is(95));
assertThat(in.read(), is(-1));
}
}
@Test(expected = IllegalStateException.class)
public void tryReadFullyEmptyReadBuffer() throws Exception {
InputStreamUtils.tryReadFully(testInputStream("test-data".getBytes()), new byte[0]);
}
private InputStream testInputStream(byte[] streamBytes) {
switch (testInputStreamType) {
case ONE_BYTE_PER_READ:
return new OneBytePerReadByteArrayInputStream(
Arrays.copyOf(streamBytes, streamBytes.length));
default:
case RANDOM_LENGTH_PER_READ:
return new RandomReadLengthByteArrayInputStream(
Arrays.copyOf(streamBytes, streamBytes.length));
}
}
}
| 6,065 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/cache/SingleThreadedLruCacheTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.cache;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.junit.Assert.assertThat;
import org.junit.Test;
public class SingleThreadedLruCacheTest {
@Test
public void exampleUsage() {
SingleThreadedLruCache<String, String> cache = new SingleThreadedLruCache<>(2);
cache.put("a", "1");
cache.put("b", "2");
assertThat(cache.get("a"), is("1"));
assertThat(cache.get("b"), is("2"));
}
@Test
public void leastRecentlyElementShouldBeEvicted() {
SingleThreadedLruCache<String, String> cache = new SingleThreadedLruCache<>(2);
cache.put("a", "1");
cache.put("b", "2");
cache.put("c", "3");
assertThat(cache.get("a"), nullValue());
assertThat(cache.get("b"), is("2"));
assertThat(cache.get("c"), is("3"));
}
}
| 6,066 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/jsonmodule/RemoteModuleTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.jsonmodule;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.statefun.extensions.ComponentBinder;
import org.apache.flink.statefun.extensions.ComponentJsonObject;
import org.apache.flink.statefun.extensions.ExtensionModule;
import org.apache.flink.statefun.flink.core.StatefulFunctionsUniverse;
import org.apache.flink.statefun.flink.core.message.MessageFactoryKey;
import org.apache.flink.statefun.flink.core.message.MessageFactoryType;
import org.apache.flink.statefun.sdk.EgressType;
import org.apache.flink.statefun.sdk.FunctionType;
import org.apache.flink.statefun.sdk.IngressType;
import org.apache.flink.statefun.sdk.StatefulFunction;
import org.apache.flink.statefun.sdk.StatefulFunctionProvider;
import org.apache.flink.statefun.sdk.TypeName;
import org.apache.flink.statefun.sdk.io.EgressIdentifier;
import org.apache.flink.statefun.sdk.io.EgressSpec;
import org.apache.flink.statefun.sdk.io.IngressIdentifier;
import org.apache.flink.statefun.sdk.io.IngressSpec;
import org.apache.flink.statefun.sdk.spi.StatefulFunctionModule;
import org.junit.Test;
public final class RemoteModuleTest {
private final String modulePath = "remote-module/module.yaml";
@Test
public void exampleUsage() {
StatefulFunctionModule module = fromPath(modulePath);
assertThat(module, notNullValue());
}
@Test
public void testComponents() {
StatefulFunctionModule module = fromPath(modulePath);
StatefulFunctionsUniverse universe = emptyUniverse();
setupUniverse(universe, module, new TestComponentBindersModule());
assertThat(universe.functions(), hasKey(TestComponentBinder1.TEST_FUNCTION_TYPE));
assertThat(universe.ingress(), hasKey(TestComponentBinder2.TEST_INGRESS.id()));
assertThat(universe.egress(), hasKey(TestComponentBinder3.TEST_EGRESS.id()));
}
private static StatefulFunctionModule fromPath(String path) {
URL moduleUrl = RemoteModuleTest.class.getClassLoader().getResource(path);
assertThat(moduleUrl, not(nullValue()));
ObjectMapper mapper = JsonServiceLoader.mapper();
return JsonServiceLoader.fromUrl(mapper, moduleUrl);
}
private static StatefulFunctionsUniverse emptyUniverse() {
return new StatefulFunctionsUniverse(
MessageFactoryKey.forType(MessageFactoryType.WITH_PROTOBUF_PAYLOADS, null));
}
private static void setupUniverse(
StatefulFunctionsUniverse universe,
StatefulFunctionModule functionModule,
ExtensionModule extensionModule) {
final Map<String, String> globalConfig = new HashMap<>();
extensionModule.configure(globalConfig, universe);
functionModule.configure(globalConfig, universe);
}
private static class TestComponentBindersModule implements ExtensionModule {
@Override
public void configure(Map<String, String> globalConfigurations, Binder binder) {
binder.bindExtension(
TypeName.parseFrom("com.foo.bar/test.component.1"), new TestComponentBinder1());
binder.bindExtension(
TypeName.parseFrom("com.foo.bar/test.component.2"), new TestComponentBinder2());
binder.bindExtension(
TypeName.parseFrom("com.foo.bar/test.component.3"), new TestComponentBinder3());
}
}
private static class TestComponentBinder1 implements ComponentBinder {
private static final FunctionType TEST_FUNCTION_TYPE =
new FunctionType("test", "function.type");
@Override
public void bind(
ComponentJsonObject component, StatefulFunctionModule.Binder remoteModuleBinder) {
remoteModuleBinder.bindFunctionProvider(TEST_FUNCTION_TYPE, new TestFunctionProvider());
}
}
private static class TestComponentBinder2 implements ComponentBinder {
private static final TestIngressSpec TEST_INGRESS = new TestIngressSpec();
@Override
public void bind(
ComponentJsonObject component, StatefulFunctionModule.Binder remoteModuleBinder) {
remoteModuleBinder.bindIngress(TEST_INGRESS);
}
}
private static class TestComponentBinder3 implements ComponentBinder {
private static final TestEgressSpec TEST_EGRESS = new TestEgressSpec();
@Override
public void bind(
ComponentJsonObject component, StatefulFunctionModule.Binder remoteModuleBinder) {
remoteModuleBinder.bindEgress(TEST_EGRESS);
}
}
private static class TestFunctionProvider implements StatefulFunctionProvider {
@Override
public StatefulFunction functionOfType(FunctionType type) {
throw new UnsupportedOperationException();
}
}
private static class TestIngressSpec implements IngressSpec<String> {
@Override
public IngressIdentifier<String> id() {
return new IngressIdentifier<>(String.class, "test-namespace", "test-ingress");
}
@Override
public IngressType type() {
throw new UnsupportedOperationException();
}
}
private static class TestEgressSpec implements EgressSpec<String> {
@Override
public EgressIdentifier<String> id() {
return new EgressIdentifier<>("test-namespace", "test-egress", String.class);
}
@Override
public EgressType type() {
throw new UnsupportedOperationException();
}
}
}
| 6,067 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/reqreply/PersistedRemoteFunctionValuesTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.reqreply;
import static org.apache.flink.statefun.flink.core.reqreply.PersistedRemoteFunctionValues.RemoteFunctionStateException;
import static org.hamcrest.CoreMatchers.hasItems;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.core.Is.is;
import com.google.protobuf.ByteString;
import java.util.Arrays;
import java.util.Collections;
import org.apache.flink.statefun.sdk.TypeName;
import org.apache.flink.statefun.sdk.reqreply.generated.FromFunction.PersistedValueMutation;
import org.apache.flink.statefun.sdk.reqreply.generated.FromFunction.PersistedValueSpec;
import org.apache.flink.statefun.sdk.reqreply.generated.ToFunction.InvocationBatchRequest;
import org.apache.flink.statefun.sdk.reqreply.generated.ToFunction.PersistedValue;
import org.apache.flink.statefun.sdk.reqreply.generated.TypedValue;
import org.junit.Test;
public class PersistedRemoteFunctionValuesTest {
private static final TypeName TEST_STATE_TYPE = TypeName.parseFrom("com.foo.bar/testType");
@Test
public void exampleUsage() {
final PersistedRemoteFunctionValues values = new PersistedRemoteFunctionValues();
// --- register persisted states
values.registerStates(
Arrays.asList(
protocolPersistedValueSpec("state-1", TEST_STATE_TYPE),
protocolPersistedValueSpec("state-2", TEST_STATE_TYPE)));
// --- update state values
values.updateStateValues(
Arrays.asList(
protocolPersistedValueModifyMutation(
"state-1", protocolTypedValue(TEST_STATE_TYPE, ByteString.copyFromUtf8("data-1"))),
protocolPersistedValueModifyMutation(
"state-2",
protocolTypedValue(TEST_STATE_TYPE, ByteString.copyFromUtf8("data-2")))));
final InvocationBatchRequest.Builder builder = InvocationBatchRequest.newBuilder();
values.attachStateValues(builder);
// --- registered state names and their values should be attached
assertThat(builder.getStateList().size(), is(2));
assertThat(
builder.getStateList(),
hasItems(
protocolPersistedValue(
"state-1", protocolTypedValue(TEST_STATE_TYPE, ByteString.copyFromUtf8("data-1"))),
protocolPersistedValue(
"state-2",
protocolTypedValue(TEST_STATE_TYPE, ByteString.copyFromUtf8("data-2")))));
}
@Test
public void zeroRegisteredStates() {
final PersistedRemoteFunctionValues values = new PersistedRemoteFunctionValues();
final InvocationBatchRequest.Builder builder = InvocationBatchRequest.newBuilder();
values.attachStateValues(builder);
assertThat(builder.getStateList().size(), is(0));
}
@Test(expected = IllegalStateException.class)
public void updatingNonRegisteredStateShouldThrow() {
final PersistedRemoteFunctionValues values = new PersistedRemoteFunctionValues();
values.updateStateValues(
Collections.singletonList(
protocolPersistedValueModifyMutation(
"non-registered-state",
protocolTypedValue(TEST_STATE_TYPE, ByteString.copyFromUtf8("data")))));
}
@Test
public void registeredStateWithEmptyValueShouldBeAttached() {
final PersistedRemoteFunctionValues values = new PersistedRemoteFunctionValues();
values.registerStates(
Collections.singletonList(protocolPersistedValueSpec("state", TEST_STATE_TYPE)));
final InvocationBatchRequest.Builder builder = InvocationBatchRequest.newBuilder();
values.attachStateValues(builder);
assertThat(builder.getStateList().size(), is(1));
assertThat(builder.getStateList(), hasItems(protocolPersistedValue("state", null)));
}
@Test
public void registeredStateWithDeletedValueShouldBeAttached() {
final PersistedRemoteFunctionValues values = new PersistedRemoteFunctionValues();
values.registerStates(
Collections.singletonList(protocolPersistedValueSpec("state", TEST_STATE_TYPE)));
// modify and then delete state value
values.updateStateValues(
Collections.singletonList(
protocolPersistedValueModifyMutation(
"state", protocolTypedValue(TEST_STATE_TYPE, ByteString.copyFromUtf8("data")))));
values.updateStateValues(
Collections.singletonList(protocolPersistedValueDeleteMutation("state")));
final InvocationBatchRequest.Builder builder = InvocationBatchRequest.newBuilder();
values.attachStateValues(builder);
assertThat(builder.getStateList().size(), is(1));
assertThat(builder.getStateList(), hasItems(protocolPersistedValue("state", null)));
}
@Test
public void duplicateRegistrationsHasNoEffect() {
final PersistedRemoteFunctionValues values = new PersistedRemoteFunctionValues();
values.registerStates(
Collections.singletonList(protocolPersistedValueSpec("state", TEST_STATE_TYPE)));
values.updateStateValues(
Collections.singletonList(
protocolPersistedValueModifyMutation(
"state", protocolTypedValue(TEST_STATE_TYPE, ByteString.copyFromUtf8("data")))));
// duplicate registration under the same state name
values.registerStates(
Collections.singletonList(protocolPersistedValueSpec("state", TEST_STATE_TYPE)));
final InvocationBatchRequest.Builder builder = InvocationBatchRequest.newBuilder();
values.attachStateValues(builder);
assertThat(builder.getStateList().size(), is(1));
assertThat(
builder.getStateList(),
hasItems(
protocolPersistedValue(
"state", protocolTypedValue(TEST_STATE_TYPE, ByteString.copyFromUtf8("data")))));
}
@Test(expected = RemoteFunctionStateException.class)
public void mismatchingStateTypeAcrossRegistrations() {
final PersistedRemoteFunctionValues values = new PersistedRemoteFunctionValues();
values.registerStates(
Collections.singletonList(
protocolPersistedValueSpec("state", TypeName.parseFrom("com.foo.bar/type-1"))));
values.registerStates(
Collections.singletonList(
protocolPersistedValueSpec("state", TypeName.parseFrom("com.foo.bar/type-2"))));
}
@Test(expected = RemoteFunctionStateException.class)
public void mutatingStateValueWithMismatchingType() {
final PersistedRemoteFunctionValues values = new PersistedRemoteFunctionValues();
values.registerStates(
Collections.singletonList(
protocolPersistedValueSpec("state", TypeName.parseFrom("com.foo.bar/type-1"))));
values.updateStateValues(
Collections.singletonList(
protocolPersistedValueModifyMutation(
"state",
protocolTypedValue(
TypeName.parseFrom("com.foo.bar/type-2"), ByteString.copyFromUtf8("data")))));
}
private static TypedValue protocolTypedValue(TypeName typename, ByteString value) {
return TypedValue.newBuilder()
.setTypename(typename.canonicalTypenameString())
.setHasValue(value != null)
.setValue(value)
.build();
}
private static PersistedValueSpec protocolPersistedValueSpec(String stateName, TypeName type) {
return PersistedValueSpec.newBuilder()
.setStateName(stateName)
.setTypeTypename(type.canonicalTypenameString())
.build();
}
private static PersistedValueMutation protocolPersistedValueModifyMutation(
String stateName, TypedValue modifyValue) {
return PersistedValueMutation.newBuilder()
.setStateName(stateName)
.setMutationType(PersistedValueMutation.MutationType.MODIFY)
.setStateValue(modifyValue)
.build();
}
private static PersistedValueMutation protocolPersistedValueDeleteMutation(String stateName) {
return PersistedValueMutation.newBuilder()
.setStateName(stateName)
.setMutationType(PersistedValueMutation.MutationType.DELETE)
.build();
}
private static PersistedValue protocolPersistedValue(String stateName, TypedValue stateValue) {
final PersistedValue.Builder builder = PersistedValue.newBuilder();
builder.setStateName(stateName);
if (stateValue != null) {
builder.setStateValue(stateValue);
}
return builder.build();
}
}
| 6,068 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/reqreply/RequestReplyFunctionTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.reqreply;
import static org.apache.flink.statefun.flink.core.TestUtils.FUNCTION_1_ADDR;
import static org.apache.flink.statefun.flink.core.common.PolyglotUtil.polyglotAddressToSdkAddress;
import static org.hamcrest.CoreMatchers.hasItems;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import com.google.protobuf.ByteString;
import java.time.Duration;
import java.util.AbstractMap.SimpleImmutableEntry;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import org.apache.flink.statefun.flink.core.backpressure.InternalContext;
import org.apache.flink.statefun.flink.core.metrics.FunctionTypeMetrics;
import org.apache.flink.statefun.flink.core.metrics.RemoteInvocationMetrics;
import org.apache.flink.statefun.sdk.Address;
import org.apache.flink.statefun.sdk.AsyncOperationResult;
import org.apache.flink.statefun.sdk.AsyncOperationResult.Status;
import org.apache.flink.statefun.sdk.FunctionType;
import org.apache.flink.statefun.sdk.io.EgressIdentifier;
import org.apache.flink.statefun.sdk.metrics.Counter;
import org.apache.flink.statefun.sdk.metrics.Metrics;
import org.apache.flink.statefun.sdk.reqreply.generated.FromFunction;
import org.apache.flink.statefun.sdk.reqreply.generated.FromFunction.DelayedInvocation;
import org.apache.flink.statefun.sdk.reqreply.generated.FromFunction.EgressMessage;
import org.apache.flink.statefun.sdk.reqreply.generated.FromFunction.ExpirationSpec;
import org.apache.flink.statefun.sdk.reqreply.generated.FromFunction.IncompleteInvocationContext;
import org.apache.flink.statefun.sdk.reqreply.generated.FromFunction.InvocationResponse;
import org.apache.flink.statefun.sdk.reqreply.generated.FromFunction.PersistedValueMutation;
import org.apache.flink.statefun.sdk.reqreply.generated.FromFunction.PersistedValueMutation.MutationType;
import org.apache.flink.statefun.sdk.reqreply.generated.FromFunction.PersistedValueSpec;
import org.apache.flink.statefun.sdk.reqreply.generated.ToFunction;
import org.apache.flink.statefun.sdk.reqreply.generated.ToFunction.Invocation;
import org.apache.flink.statefun.sdk.reqreply.generated.TypedValue;
import org.junit.Test;
public class RequestReplyFunctionTest {
private static final FunctionType FN_TYPE = new FunctionType("foo", "bar");
private final FakeClient client = new FakeClient();
private final FakeContext context = new FakeContext();
private final RequestReplyFunction functionUnderTest =
new RequestReplyFunction(
FN_TYPE, testInitialRegisteredState("session", "com.foo.bar/myType"), 10, client, true);
@Test
public void example() {
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
assertTrue(client.wasSentToFunction.hasInvocation());
assertThat(client.capturedInvocationBatchSize(), is(1));
}
@Test
public void callerIsSet() {
context.caller = FUNCTION_1_ADDR;
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
Invocation anInvocation = client.capturedInvocation(0);
Address caller = polyglotAddressToSdkAddress(anInvocation.getCaller());
assertThat(caller, is(FUNCTION_1_ADDR));
}
@Test
public void messageIsSet() {
TypedValue argument =
TypedValue.newBuilder()
.setTypename("io.statefun.foo/bar")
.setHasValue(true)
.setValue(ByteString.copyFromUtf8("Hello!"))
.build();
functionUnderTest.invoke(context, argument);
assertThat(client.capturedInvocation(0).getArgument(), is(argument));
}
@Test
public void batchIsAccumulatedWhileARequestIsInFlight() {
// send one message
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
// the following invocations should be queued and sent as a batch
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
// simulate a successful completion of the first operation
functionUnderTest.invoke(context, successfulAsyncOperation());
assertThat(client.capturedInvocationBatchSize(), is(2));
}
@Test
public void reachingABatchLimitTriggersBackpressure() {
RequestReplyFunction functionUnderTest = new RequestReplyFunction(FN_TYPE, 2, client);
// send one message
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
// the following invocations should be queued
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
// the following invocations should request backpressure
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
assertThat(context.needsWaiting, is(true));
}
@Test
public void returnedMessageReleaseBackpressure() {
RequestReplyFunction functionUnderTest = new RequestReplyFunction(FN_TYPE, 2, client);
// the following invocations should cause backpressure
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
// complete one message, should send a batch of size 3
context.needsWaiting = false;
functionUnderTest.invoke(context, successfulAsyncOperation());
// the next message should not cause backpressure.
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
assertThat(context.needsWaiting, is(false));
}
@Test
public void stateIsModified() {
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
// A message returned from the function
// that asks to put "hello" into the session state.
FromFunction response =
FromFunction.newBuilder()
.setInvocationResult(
InvocationResponse.newBuilder()
.addStateMutations(
PersistedValueMutation.newBuilder()
.setStateValue(
TypedValue.newBuilder()
.setTypename("com.foo.bar/myType")
.setHasValue(true)
.setValue(ByteString.copyFromUtf8("hello")))
.setMutationType(MutationType.MODIFY)
.setStateName("session")))
.build();
functionUnderTest.invoke(context, successfulAsyncOperation(response));
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
assertThat(client.capturedState(0).getValue(), is(ByteString.copyFromUtf8("hello")));
}
@Test
public void delayedMessages() {
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
FromFunction response =
FromFunction.newBuilder()
.setInvocationResult(
InvocationResponse.newBuilder()
.addDelayedInvocations(
DelayedInvocation.newBuilder()
.setArgument(TypedValue.getDefaultInstance())
.setDelayInMs(1)
.build()))
.build();
functionUnderTest.invoke(context, successfulAsyncOperation(response));
assertFalse(context.delayed.isEmpty());
assertEquals(Duration.ofMillis(1), context.delayed.get(0).delay());
}
@Test
public void delayedMessagesWithCancellationToken() {
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
FromFunction response =
FromFunction.newBuilder()
.setInvocationResult(
InvocationResponse.newBuilder()
.addDelayedInvocations(
DelayedInvocation.newBuilder()
.setArgument(TypedValue.getDefaultInstance())
.setDelayInMs(1)
.setCancellationToken("foo")
.build()))
.build();
functionUnderTest.invoke(context, successfulAsyncOperation(response));
assertFalse(context.delayed.isEmpty());
assertEquals(Duration.ofMillis(1), context.delayed.get(0).delay());
assertEquals("foo", context.delayed.get(0).cancellationToken);
}
@Test
public void delayedMessagesCancellation() {
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
FromFunction response =
FromFunction.newBuilder()
.setInvocationResult(
InvocationResponse.newBuilder()
.addDelayedInvocations(
DelayedInvocation.newBuilder()
.setIsCancellationRequest(true)
.setCancellationToken("foo")
.build()))
.build();
functionUnderTest.invoke(context, successfulAsyncOperation(response));
assertFalse(context.delayCancellations.isEmpty());
assertEquals("foo", context.delayCancellations.get(0));
}
@Test
public void egressIsSent() {
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
FromFunction response =
FromFunction.newBuilder()
.setInvocationResult(
InvocationResponse.newBuilder()
.addOutgoingEgresses(
EgressMessage.newBuilder()
.setArgument(TypedValue.getDefaultInstance())
.setEgressNamespace("org.foo")
.setEgressType("bar")))
.build();
functionUnderTest.invoke(context, successfulAsyncOperation(response));
assertFalse(context.egresses.isEmpty());
assertEquals(
new EgressIdentifier<>("org.foo", "bar", TypedValue.class),
context.egresses.get(0).getKey());
}
@Test
public void retryBatchOnIncompleteInvocationContextResponse() {
TypedValue argument =
TypedValue.newBuilder()
.setTypename("io.statefun.foo/bar")
.setValue(ByteString.copyFromUtf8("Hello!"))
.build();
functionUnderTest.invoke(context, argument);
FromFunction response =
FromFunction.newBuilder()
.setIncompleteInvocationContext(
IncompleteInvocationContext.newBuilder()
.addMissingValues(
PersistedValueSpec.newBuilder()
.setStateName("new-state")
.setExpirationSpec(
ExpirationSpec.newBuilder()
.setMode(ExpirationSpec.ExpireMode.AFTER_INVOKE)
.setExpireAfterMillis(5000)
.build())))
.build();
functionUnderTest.invoke(context, successfulAsyncOperation(client.wasSentToFunction, response));
// re-sent batch should have identical invocation input messages
assertTrue(client.wasSentToFunction.hasInvocation());
assertThat(client.capturedInvocationBatchSize(), is(1));
assertThat(client.capturedInvocation(0).getArgument(), is(argument));
// re-sent batch should have new state as well as originally registered state
assertThat(client.capturedStateNames().size(), is(2));
assertThat(client.capturedStateNames(), hasItems("session", "new-state"));
}
@Test
public void backlogMetricsIncreasedOnInvoke() {
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
// following should be accounted into backlog metrics
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
assertThat(context.functionTypeMetrics().numBacklog, is(2));
}
@Test
public void backlogMetricsDecreasedOnNextSuccess() {
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
// following should be accounted into backlog metrics
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
functionUnderTest.invoke(context, TypedValue.getDefaultInstance());
// complete one message, should fully consume backlog
context.needsWaiting = false;
functionUnderTest.invoke(context, successfulAsyncOperation());
assertThat(context.functionTypeMetrics().numBacklog, is(0));
}
@Test
public void retryBatchOnUnkownAsyncResponseAfterRestore() {
TypedValue argument =
TypedValue.newBuilder()
.setTypename("io.statefun.foo/bar")
.setValue(ByteString.copyFromUtf8("Hello!"))
.build();
functionUnderTest.invoke(context, argument);
ToFunction originalRequest = client.wasSentToFunction;
RequestReplyFunction restoredFunction =
new RequestReplyFunction(FN_TYPE, new PersistedRemoteFunctionValues(), 2, client, true);
restoredFunction.invoke(context, unknownAsyncOperation(originalRequest));
// retry batch after a restore on an unknown async operation should start with empty state specs
assertTrue(client.wasSentToFunction.hasInvocation());
assertThat(client.capturedInvocationBatchSize(), is(1));
assertThat(client.capturedInvocation(0).getArgument(), is(argument));
assertThat(client.capturedStateNames().size(), is(0));
}
private static PersistedRemoteFunctionValues testInitialRegisteredState(
String existingStateName, String typename) {
final PersistedRemoteFunctionValues states = new PersistedRemoteFunctionValues();
states.registerStates(
Collections.singletonList(
PersistedValueSpec.newBuilder()
.setTypeTypename(typename)
.setStateName(existingStateName)
.build()));
return states;
}
private static AsyncOperationResult<Object, FromFunction> successfulAsyncOperation() {
return new AsyncOperationResult<>(
new Object(), Status.SUCCESS, FromFunction.getDefaultInstance(), null);
}
private static AsyncOperationResult<Object, FromFunction> successfulAsyncOperation(
FromFunction fromFunction) {
return new AsyncOperationResult<>(new Object(), Status.SUCCESS, fromFunction, null);
}
private static AsyncOperationResult<ToFunction, FromFunction> successfulAsyncOperation(
ToFunction toFunction, FromFunction fromFunction) {
return new AsyncOperationResult<>(toFunction, Status.SUCCESS, fromFunction, null);
}
private static AsyncOperationResult<ToFunction, FromFunction> unknownAsyncOperation(
ToFunction toFunction) {
return new AsyncOperationResult<>(
toFunction, Status.UNKNOWN, FromFunction.getDefaultInstance(), null);
}
private static final class FakeClient implements RequestReplyClient {
ToFunction wasSentToFunction;
Supplier<FromFunction> fromFunction = FromFunction::getDefaultInstance;
@Override
public CompletableFuture<FromFunction> call(
ToFunctionRequestSummary requestSummary,
RemoteInvocationMetrics metrics,
ToFunction toFunction) {
this.wasSentToFunction = toFunction;
try {
return CompletableFuture.completedFuture(this.fromFunction.get());
} catch (Throwable t) {
CompletableFuture<FromFunction> failed = new CompletableFuture<>();
failed.completeExceptionally(t);
return failed;
}
}
/** return the n-th invocation sent as part of the current batch. */
Invocation capturedInvocation(int n) {
return wasSentToFunction.getInvocation().getInvocations(n);
}
TypedValue capturedState(int n) {
return wasSentToFunction.getInvocation().getState(n).getStateValue();
}
Set<String> capturedStateNames() {
return wasSentToFunction.getInvocation().getStateList().stream()
.map(ToFunction.PersistedValue::getStateName)
.collect(Collectors.toSet());
}
public int capturedInvocationBatchSize() {
return wasSentToFunction.getInvocation().getInvocationsCount();
}
}
private static final class DelayedMessage {
final Duration delay;
final @Nullable String cancellationToken;
final Address target;
final Object message;
public DelayedMessage(
Duration delay, @Nullable String cancellationToken, Address target, Object message) {
this.delay = delay;
this.cancellationToken = cancellationToken;
this.target = target;
this.message = message;
}
public Duration delay() {
return delay;
}
@Nullable
public String messageId() {
return cancellationToken;
}
public Address target() {
return target;
}
public Object message() {
return message;
}
}
private static final class FakeContext implements InternalContext {
private final BacklogTrackingMetrics fakeMetrics = new BacklogTrackingMetrics();
Address caller;
boolean needsWaiting;
// capture emitted messages
List<Map.Entry<EgressIdentifier<?>, ?>> egresses = new ArrayList<>();
List<DelayedMessage> delayed = new ArrayList<>();
List<String> delayCancellations = new ArrayList<>();
@Override
public void awaitAsyncOperationComplete() {
needsWaiting = true;
}
@Override
public BacklogTrackingMetrics functionTypeMetrics() {
return fakeMetrics;
}
@Override
public Address self() {
return new Address(FN_TYPE, "0");
}
@Override
public Address caller() {
return caller;
}
@Override
public void send(Address to, Object message) {}
@Override
public <T> void send(EgressIdentifier<T> egress, T message) {
egresses.add(new SimpleImmutableEntry<>(egress, message));
}
@Override
public void sendAfter(Duration delay, Address to, Object message) {
delayed.add(new DelayedMessage(delay, null, to, message));
}
@Override
public void sendAfter(Duration delay, Address to, Object message, String cancellationToken) {
delayed.add(new DelayedMessage(delay, cancellationToken, to, message));
}
@Override
public void cancelDelayedMessage(String cancellationToken) {
delayCancellations.add(cancellationToken);
}
@Override
public <M, T> void registerAsyncOperation(M metadata, CompletableFuture<T> future) {}
@Override
public Metrics metrics() {
return name ->
new Counter() {
@Override
public void inc(long amount) {}
@Override
public void dec(long amount) {}
};
}
}
private static final class BacklogTrackingMetrics implements FunctionTypeMetrics {
private int numBacklog = 0;
public int numBacklog() {
return numBacklog;
}
@Override
public void appendBacklogMessages(int count) {
numBacklog += count;
}
@Override
public void consumeBacklogMessages(int count) {
numBacklog -= count;
}
@Override
public Metrics functionTypeScopedMetrics() {
throw new UnsupportedOperationException();
}
@Override
public void remoteInvocationFailures() {}
@Override
public void remoteInvocationLatency(long elapsed) {}
@Override
public void asyncOperationRegistered() {}
@Override
public void asyncOperationCompleted() {}
@Override
public void incomingMessage() {}
@Override
public void outgoingRemoteMessage() {}
@Override
public void outgoingEgressMessage() {}
@Override
public void outgoingLocalMessage() {}
@Override
public void blockedAddress() {}
@Override
public void unblockedAddress() {}
}
}
| 6,069 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/backpressure/BoundedExponentialBackoffTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.backpressure;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
import java.time.Duration;
import org.junit.Test;
public class BoundedExponentialBackoffTest {
private final FakeNanoClock fakeTime = new FakeNanoClock();
private final BoundedExponentialBackoff backoffUnderTest =
new BoundedExponentialBackoff(fakeTime, Duration.ofSeconds(1), Duration.ofMinutes(1));
@Test
public void simpleUsage() {
assertThat(backoffUnderTest.applyNow(), is(true));
assertThat(fakeTime.now(), greaterThan(0L));
}
@Test
public void timeoutExpired() {
fakeTime.now = Duration.ofMinutes(1).toNanos();
assertThat(backoffUnderTest.applyNow(), is(false));
}
@Test
@SuppressWarnings("StatementWithEmptyBody")
public void totalNumberOfBackoffsIsEqualToTimeout() {
while (backoffUnderTest.applyNow()) {}
assertThat(fakeTime.now(), is(Duration.ofMinutes(1).toNanos()));
}
private static final class FakeNanoClock implements Timer {
long now;
@Override
public long now() {
return now;
}
@Override
public void sleep(long durationNano) {
now += durationNano;
}
}
}
| 6,070 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/backpressure/ThresholdBackPressureValveTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.backpressure;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import org.apache.flink.statefun.flink.core.TestUtils;
import org.junit.Test;
public class ThresholdBackPressureValveTest {
@Test
public void simpleUsage() {
ThresholdBackPressureValve valve = new ThresholdBackPressureValve(2);
valve.notifyAsyncOperationRegistered();
valve.notifyAsyncOperationRegistered();
assertTrue(valve.shouldBackPressure());
}
@Test
public void completedOperationReleaseBackpressure() {
ThresholdBackPressureValve valve = new ThresholdBackPressureValve(1);
valve.notifyAsyncOperationRegistered();
valve.notifyAsyncOperationCompleted(TestUtils.FUNCTION_1_ADDR);
assertFalse(valve.shouldBackPressure());
}
@Test
public void blockAddressTriggerBackpressure() {
ThresholdBackPressureValve valve = new ThresholdBackPressureValve(500);
valve.blockAddress(TestUtils.FUNCTION_1_ADDR);
assertTrue(valve.shouldBackPressure());
}
@Test
public void blockingAndUnblockingAddress() {
ThresholdBackPressureValve valve = new ThresholdBackPressureValve(500);
valve.blockAddress(TestUtils.FUNCTION_1_ADDR);
valve.notifyAsyncOperationCompleted(TestUtils.FUNCTION_1_ADDR);
assertFalse(valve.shouldBackPressure());
}
@Test
public void unblockingDifferentAddressStillBackpressures() {
ThresholdBackPressureValve valve = new ThresholdBackPressureValve(500);
valve.blockAddress(TestUtils.FUNCTION_1_ADDR);
valve.notifyAsyncOperationCompleted(TestUtils.FUNCTION_2_ADDR);
assertTrue(valve.shouldBackPressure());
}
@Test
public void blockTwoAddress() {
ThresholdBackPressureValve valve = new ThresholdBackPressureValve(500);
valve.blockAddress(TestUtils.FUNCTION_1_ADDR);
valve.blockAddress(TestUtils.FUNCTION_2_ADDR);
assertTrue(valve.shouldBackPressure());
valve.notifyAsyncOperationCompleted(TestUtils.FUNCTION_1_ADDR);
assertTrue(valve.shouldBackPressure());
valve.notifyAsyncOperationCompleted(TestUtils.FUNCTION_2_ADDR);
assertFalse(valve.shouldBackPressure());
}
}
| 6,071 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/message/MessageTypeSerializerSnapshotTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.message;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Arrays;
import org.apache.flink.core.memory.DataInputView;
import org.apache.flink.core.memory.DataInputViewStreamWrapper;
import org.apache.flink.core.memory.DataOutputView;
import org.apache.flink.core.memory.DataOutputViewStreamWrapper;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@RunWith(Parameterized.class)
public class MessageTypeSerializerSnapshotTest {
private static final String serializerClassName = "com.domain.Serializer";
private static class SnapshotData {
public int version;
public byte[] bytes;
}
private static interface SnapshotDataProvider {
SnapshotData provide(MessageFactoryKey messageFactoryKey) throws IOException;
}
private final MessageFactoryKey messageFactoryKey;
private final SnapshotDataProvider snapshotDataProvider;
public MessageTypeSerializerSnapshotTest(
MessageFactoryKey messageFactoryKey, SnapshotDataProvider snapshotDataProvider) {
this.messageFactoryKey = messageFactoryKey;
this.snapshotDataProvider = snapshotDataProvider;
}
@Parameterized.Parameters(name = "{0}")
public static Iterable<? extends Object[]> data() throws IOException {
MessageFactoryKey kryoFactoryKey =
MessageFactoryKey.forType(MessageFactoryType.WITH_KRYO_PAYLOADS, null);
MessageFactoryKey customFactoryKey =
MessageFactoryKey.forType(MessageFactoryType.WITH_CUSTOM_PAYLOADS, serializerClassName);
// generates snapshot data for V1, without customPayloadSerializerClassName
SnapshotDataProvider snapshotDataProviderV1 =
messageFactoryKey -> {
try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) {
DataOutputView dataOutputView = new DataOutputViewStreamWrapper(bos);
dataOutputView.writeUTF(messageFactoryKey.getType().name());
return new SnapshotData() {
{
version = 1;
bytes = bos.toByteArray();
}
};
}
};
// generates snapshot data for V2, the current version
SnapshotDataProvider snapshotDataProviderV2 =
messageFactoryKey -> {
MessageTypeSerializer.Snapshot snapshot =
new MessageTypeSerializer.Snapshot(messageFactoryKey);
try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) {
DataOutputView dataOutputView = new DataOutputViewStreamWrapper(bos);
snapshot.writeSnapshot(dataOutputView);
return new SnapshotData() {
{
version = 2;
bytes = bos.toByteArray();
}
};
}
};
return Arrays.asList(
new Object[] {kryoFactoryKey, snapshotDataProviderV1},
new Object[] {kryoFactoryKey, snapshotDataProviderV2},
new Object[] {customFactoryKey, snapshotDataProviderV2});
}
@Test
public void roundTrip() throws IOException {
SnapshotData snapshotData = this.snapshotDataProvider.provide(this.messageFactoryKey);
MessageTypeSerializer.Snapshot snapshot =
new MessageTypeSerializer.Snapshot(this.messageFactoryKey);
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
try (ByteArrayInputStream bis = new ByteArrayInputStream(snapshotData.bytes)) {
DataInputView dataInputView = new DataInputViewStreamWrapper(bis);
snapshot.readSnapshot(snapshotData.version, dataInputView, classLoader);
}
// make sure the deserialized state matches what was used to serialize
assert (snapshot.getMessageFactoryKey().equals(this.messageFactoryKey));
}
}
| 6,072 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/message/MessageTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.message;
import static org.apache.flink.statefun.flink.core.TestUtils.*;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import java.io.IOException;
import java.util.Arrays;
import org.apache.flink.core.memory.DataInputDeserializer;
import org.apache.flink.core.memory.DataOutputSerializer;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
@RunWith(Parameterized.class)
public class MessageTest {
private final MessageFactoryType type;
private final String customPayloadSerializerClassName;
private final Object payload;
public MessageTest(
MessageFactoryType type, String customPayloadSerializerClassName, Object payload) {
this.type = type;
this.customPayloadSerializerClassName = customPayloadSerializerClassName;
this.payload = payload;
}
@Parameters(name = "{0}")
public static Iterable<? extends Object[]> data() {
return Arrays.asList(
new Object[] {MessageFactoryType.WITH_KRYO_PAYLOADS, null, DUMMY_PAYLOAD},
new Object[] {MessageFactoryType.WITH_PROTOBUF_PAYLOADS, null, DUMMY_PAYLOAD},
new Object[] {MessageFactoryType.WITH_RAW_PAYLOADS, null, DUMMY_PAYLOAD.toByteArray()},
new Object[] {
MessageFactoryType.WITH_CUSTOM_PAYLOADS,
"org.apache.flink.statefun.flink.core.message.JavaPayloadSerializer",
DUMMY_PAYLOAD
});
}
@Test
public void roundTrip() throws IOException {
MessageFactory factory =
MessageFactory.forKey(MessageFactoryKey.forType(type, customPayloadSerializerClassName));
Message fromSdk = factory.from(FUNCTION_1_ADDR, FUNCTION_2_ADDR, payload);
DataOutputSerializer out = new DataOutputSerializer(32);
fromSdk.writeTo(factory, out);
Message fromEnvelope = factory.from(new DataInputDeserializer(out.getCopyOfBuffer()));
assertThat(fromEnvelope.source(), is(FUNCTION_1_ADDR));
assertThat(fromEnvelope.target(), is(FUNCTION_2_ADDR));
ClassLoader targetClassLoader = payload.getClass().getClassLoader();
Object payload = fromEnvelope.payload(factory, targetClassLoader);
assertThat(payload, is(this.payload));
}
}
| 6,073 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/message/JavaPayloadSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.message;
import com.google.protobuf.ByteString;
import java.io.*;
import javax.annotation.Nonnull;
import org.apache.flink.statefun.flink.core.generated.Payload;
// this is a payload serializer that uses normal java serialization, used for testing custom payload
// serialization
public class JavaPayloadSerializer implements MessagePayloadSerializer {
@Override
public Payload serialize(@Nonnull Object payloadObject) {
try {
String className = payloadObject.getClass().getName();
try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) {
try (ObjectOutputStream out = new ObjectOutputStream(bos)) {
out.writeObject(payloadObject);
out.flush();
byte[] bytes = bos.toByteArray();
return Payload.newBuilder()
.setClassName(className)
.setPayloadBytes(ByteString.copyFrom(bytes))
.build();
}
}
} catch (Throwable ex) {
throw new RuntimeException(ex);
}
}
@Override
public Object deserialize(@Nonnull ClassLoader targetClassLoader, @Nonnull Payload payload) {
try {
try (ByteArrayInputStream bis =
new ByteArrayInputStream(payload.getPayloadBytes().toByteArray())) {
try (ObjectInput in = new ObjectInputStream(bis)) {
return in.readObject();
}
}
} catch (Throwable ex) {
throw new RuntimeException(ex);
}
}
@Override
public Object copy(@Nonnull ClassLoader targetClassLoader, @Nonnull Object what) {
return deserialize(targetClassLoader, serialize(what));
}
}
| 6,074 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/message/MessageTypeSerializerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.message;
import java.io.IOException;
import java.util.Arrays;
import java.util.stream.LongStream;
import org.apache.flink.api.common.ExecutionConfig;
import org.apache.flink.api.common.typeutils.SerializerTestBase;
import org.apache.flink.api.common.typeutils.TypeSerializer;
import org.apache.flink.core.memory.DataOutputSerializer;
import org.apache.flink.statefun.flink.core.TestUtils;
import org.apache.flink.testutils.DeeplyEqualsChecker;
import org.junit.Ignore;
public class MessageTypeSerializerTest extends SerializerTestBase<Message> {
public MessageTypeSerializerTest() {
super(
new DeeplyEqualsChecker() {
@Override
public boolean deepEquals(Object o1, Object o2) {
Message a = (Message) o1;
Message b = (Message) o2;
DataOutputSerializer aOut = new DataOutputSerializer(32);
DataOutputSerializer bOut = new DataOutputSerializer(32);
MessageFactory factory =
MessageFactory.forKey(
MessageFactoryKey.forType(MessageFactoryType.WITH_KRYO_PAYLOADS, null));
try {
a.writeTo(factory, aOut);
} catch (IOException e) {
throw new RuntimeException(e);
}
try {
b.writeTo(factory, bOut);
} catch (IOException e) {
throw new RuntimeException(e);
}
return Arrays.equals(aOut.getCopyOfBuffer(), bOut.getCopyOfBuffer());
}
});
}
@Override
protected TypeSerializer<Message> createSerializer() {
return new MessageTypeInformation(
MessageFactoryKey.forType(MessageFactoryType.WITH_KRYO_PAYLOADS, null))
.createSerializer(new ExecutionConfig());
}
@Override
protected int getLength() {
return -1;
}
@Override
protected Class<Message> getTypeClass() {
return Message.class;
}
@Override
protected Message[] getTestData() {
return LongStream.range(1, 100)
.mapToObj(TestUtils.ENVELOPE_FACTORY::from)
.toArray(Message[]::new);
}
@Ignore
@Override
public void testInstantiate() {}
}
| 6,075 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/state/PersistedStatesTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.state;
import static org.hamcrest.CoreMatchers.hasItems;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.annotation.Nonnull;
import org.apache.flink.statefun.flink.core.TestUtils;
import org.apache.flink.statefun.sdk.Address;
import org.apache.flink.statefun.sdk.FunctionType;
import org.apache.flink.statefun.sdk.TypeName;
import org.apache.flink.statefun.sdk.annotations.Persisted;
import org.apache.flink.statefun.sdk.state.Accessor;
import org.apache.flink.statefun.sdk.state.AppendingBufferAccessor;
import org.apache.flink.statefun.sdk.state.PersistedAppendingBuffer;
import org.apache.flink.statefun.sdk.state.PersistedStateRegistry;
import org.apache.flink.statefun.sdk.state.PersistedTable;
import org.apache.flink.statefun.sdk.state.PersistedValue;
import org.apache.flink.statefun.sdk.state.RemotePersistedValue;
import org.apache.flink.statefun.sdk.state.TableAccessor;
import org.junit.Test;
public class PersistedStatesTest {
// test collaborators
private final FakeState state = new FakeState();
// object under test
private final FlinkStateBinder binderUnderTest =
new FlinkStateBinder(state, TestUtils.FUNCTION_TYPE);
@Test
public void exampleUsage() {
PersistedStates.findReflectivelyAndBind(new SanityClass(), binderUnderTest);
assertThat(state.boundNames, hasItems("name", "last"));
}
@Test(expected = IllegalStateException.class)
public void nullValueField() {
PersistedStates.findReflectivelyAndBind(new NullValueClass(), binderUnderTest);
}
@Test
public void nonAnnotatedClass() {
PersistedStates.findReflectivelyAndBind(new IgnoreNonAnnotated(), binderUnderTest);
assertTrue(state.boundNames.isEmpty());
}
@Test
public void extendedClass() {
PersistedStates.findReflectivelyAndBind(new ChildClass(), binderUnderTest);
assertThat(state.boundNames, hasItems("parent", "child"));
}
@Test(expected = IllegalArgumentException.class)
public void staticPersistedFieldsAreNotAllowed() {
PersistedStates.findReflectivelyAndBind(new StaticPersistedValue(), binderUnderTest);
}
@Test
public void bindPersistedTable() {
PersistedStates.findReflectivelyAndBind(new PersistedTableValue(), binderUnderTest);
assertThat(state.boundNames, hasItems("table"));
}
@Test
public void bindPersistedAppendingBuffer() {
PersistedStates.findReflectivelyAndBind(new PersistedAppendingBufferState(), binderUnderTest);
assertThat(state.boundNames, hasItems("buffer"));
}
@Test
public void bindRemotePersistedValue() {
PersistedStates.findReflectivelyAndBind(new RemotePersistedValueState(), binderUnderTest);
assertThat(state.boundNames, hasItems("remote"));
}
@Test
public void bindDynamicState() {
DynamicState dynamicState = new DynamicState();
PersistedStates.findReflectivelyAndBind(dynamicState, binderUnderTest);
dynamicState.process();
assertThat(
state.boundNames,
hasItems(
"in-constructor-value",
"in-constructor-table",
"in-constructor-buffer",
"post-constructor-value",
"post-constructor-table",
"post-constructor-buffer"));
}
@Test
public void bindComposedState() {
PersistedStates.findReflectivelyAndBind(new OuterClass(), binderUnderTest);
assertThat(state.boundNames, hasItems("inner"));
}
static final class SanityClass {
@SuppressWarnings("unused")
@Persisted
PersistedValue<String> name = PersistedValue.of("name", String.class);
@Persisted
@SuppressWarnings("unused")
PersistedValue<String> last = PersistedValue.of("last", String.class);
}
static final class NullValueClass {
@SuppressWarnings("unused")
@Persisted
PersistedValue<String> last;
}
abstract static class ParentClass {
@SuppressWarnings("unused")
@Persisted
PersistedValue<String> parent = PersistedValue.of("parent", String.class);
}
static final class ChildClass extends ParentClass {
@SuppressWarnings("unused")
@Persisted
PersistedValue<String> child = PersistedValue.of("child", String.class);
}
static final class IgnoreNonAnnotated {
@SuppressWarnings("unused")
PersistedValue<String> last = PersistedValue.of("last", String.class);
}
static final class StaticPersistedValue {
@Persisted
@SuppressWarnings("unused")
static PersistedValue<String> value = PersistedValue.of("static", String.class);
}
static final class PersistedTableValue {
@Persisted
@SuppressWarnings("unused")
PersistedTable<String, byte[]> value = PersistedTable.of("table", String.class, byte[].class);
}
static final class PersistedAppendingBufferState {
@Persisted
@SuppressWarnings("unused")
PersistedAppendingBuffer<Boolean> value = PersistedAppendingBuffer.of("buffer", Boolean.class);
}
static final class RemotePersistedValueState {
@Persisted
@SuppressWarnings("unused")
RemotePersistedValue remoteValue =
RemotePersistedValue.of("remote", TypeName.parseFrom("io.statefun.types/raw"));
}
static final class DynamicState {
@Persisted PersistedStateRegistry provider = new PersistedStateRegistry();
DynamicState() {
provider.registerValue(PersistedValue.of("in-constructor-value", String.class));
provider.registerTable(
PersistedTable.of("in-constructor-table", String.class, Integer.class));
provider.registerAppendingBuffer(
PersistedAppendingBuffer.of("in-constructor-buffer", String.class));
}
void process() {
provider.registerValue(PersistedValue.of("post-constructor-value", String.class));
provider.registerTable(
PersistedTable.of("post-constructor-table", String.class, Integer.class));
provider.registerAppendingBuffer(
PersistedAppendingBuffer.of("post-constructor-buffer", String.class));
}
}
static final class InnerClass {
@Persisted
@SuppressWarnings("unused")
PersistedValue<String> value = PersistedValue.of("inner", String.class);
}
static final class OuterClass {
@Persisted
@SuppressWarnings("unused")
InnerClass innerClass = new InnerClass();
}
private static final class FakeState implements State {
Set<String> boundNames = new HashSet<>();
@Override
public <T> Accessor<T> createFlinkStateAccessor(
FunctionType functionType, PersistedValue<T> persistedValue) {
boundNames.add(persistedValue.name());
return new Accessor<T>() {
T value;
@Override
public void set(T value) {
this.value = value;
}
@Override
public T get() {
return value;
}
@Override
public void clear() {
value = null;
}
};
}
@Override
public Accessor<byte[]> createFlinkRemoteStateAccessor(
FunctionType functionType, RemotePersistedValue remotePersistedValue) {
boundNames.add(remotePersistedValue.name());
return new Accessor<byte[]>() {
byte[] value;
@Override
public void set(byte[] value) {
this.value = value;
}
@Override
public byte[] get() {
return value;
}
@Override
public void clear() {
value = null;
}
};
}
@Override
public <K, V> TableAccessor<K, V> createFlinkStateTableAccessor(
FunctionType functionType, PersistedTable<K, V> persistedTable) {
boundNames.add(persistedTable.name());
return new TableAccessor<K, V>() {
Map<K, V> map = new HashMap<>();
@Override
public void set(K key, V value) {
map.put(key, value);
}
@Override
public V get(K key) {
return map.get(key);
}
@Override
public void remove(K key) {
map.remove(key);
}
@Override
public Iterable<Map.Entry<K, V>> entries() {
return map.entrySet();
}
@Override
public Iterable<K> keys() {
return map.keySet();
}
@Override
public Iterable<V> values() {
return map.values();
}
@Override
public void clear() {
map.clear();
}
};
}
@Override
public <E> AppendingBufferAccessor<E> createFlinkStateAppendingBufferAccessor(
FunctionType functionType, PersistedAppendingBuffer<E> persistedAppendingBuffer) {
boundNames.add(persistedAppendingBuffer.name());
return new AppendingBufferAccessor<E>() {
private List<E> list;
@Override
public void append(@Nonnull E element) {
if (list == null) {
list = new ArrayList<>();
}
list.add(element);
}
@Override
public void appendAll(@Nonnull List<E> elements) {
if (list == null) {
list = new ArrayList<>();
}
list.addAll(elements);
}
@Override
public void replaceWith(@Nonnull List<E> elements) {
list = elements;
}
@Nonnull
@Override
public Iterable<E> view() {
return list;
}
@Override
public void clear() {
list = null;
}
};
}
@Override
public void setCurrentKey(Address key) {
throw new UnsupportedOperationException();
}
}
}
| 6,076 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/feedback/FeedbackChannelTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.feedback;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.contains;
import java.util.ArrayList;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.junit.Ignore;
import org.junit.Test;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.infra.Blackhole;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
@SuppressWarnings({
"SameParameterValue",
"IOResourceOpenedButNotSafelyClosed",
"resource",
"IOResourceOpenedButNotSafelyClosed",
"unused"
})
public class FeedbackChannelTest {
private static final SubtaskFeedbackKey<String> KEY =
new FeedbackKey<String>("foo", 1).withSubTaskIndex(0, 1);
@Test
public void exampleUsage() {
FeedbackChannel<String> channel =
new FeedbackChannel<>(KEY, new LockFreeBatchFeedbackQueue<>());
channel.put("hello");
channel.put("world");
channel.close();
ArrayList<String> results = new ArrayList<>();
channel.registerConsumer(results::add, Runnable::run);
assertThat(results, contains("hello", "world"));
}
@Ignore("benchmarks are not run as part of a regular test suite.")
@Test
public void launchBenchmark() throws Exception {
// The following is the result of comparing 3 different queue implementations for a spsc
// this is the result of concurrently producing and consuming 1 million items.
// The other implementations are not checkpointed into the code base to avoid accidental
// megamorphic call sites.
//
// Benchmark Mode Cnt Score Error Units
// HandOffFeedbackChannelTest.lockBasedHandOffQueue avgt 4 55.284 ± 43.094 ms/op
// HandOffFeedbackChannelTest.lockFreeBatchHandOffQueue avgt 4 17.411 ± 3.968 ms/op
// HandOffFeedbackChannelTest.lockFreeStackBasedQueue avgt 4 42.569 ± 8.616 ms/op
//
Options opt =
new OptionsBuilder()
.include(this.getClass().getName() + ".*")
.timeUnit(TimeUnit.MILLISECONDS)
.warmupIterations(4)
.measurementIterations(4)
.threads(2)
.forks(1)
.shouldFailOnError(true)
.shouldDoGC(true)
.build();
new Runner(opt).run();
}
@Benchmark
@BenchmarkMode(Mode.AverageTime)
public void lockFreeBatchHandOffQueue(Blackhole blackhole) {
int bh = benchmark(new LockFreeBatchFeedbackQueue<>(), 1_000_000);
blackhole.consume(bh);
}
private static int benchmark(FeedbackQueue<String> queue, int items) {
FeedbackChannel<String> channel = new FeedbackChannel<>(KEY, queue);
//
// consumer
//
int[] consumed = new int[1];
Object lock = new Object();
ExecutorService executor =
Executors.newSingleThreadExecutor(
runnable -> {
Thread t = new Thread(runnable);
t.setDaemon(true);
return t;
});
channel.registerConsumer(unused -> consumed[0]++, executor);
//
// producer
//
for (int i = 0; i < items; i++) {
channel.put("hello");
}
channel.close();
return consumed[0];
}
}
| 6,077 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/feedback/CheckpointsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.feedback;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertThat;
import java.io.ByteArrayOutputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.function.Supplier;
import org.apache.flink.statefun.flink.core.logger.FeedbackLogger;
import org.apache.flink.util.Preconditions;
import org.junit.Test;
public class CheckpointsTest {
@Test
public void usageExample() {
Loggers loggers = new Loggers();
Checkpoints<String> checkpoints = new Checkpoints<>(loggers);
checkpoints.startLogging(1, new ByteArrayOutputStream());
checkpoints.append("hello");
checkpoints.append("world");
checkpoints.commitCheckpointsUntil(1);
assertThat(loggers.items(0), contains("hello", "world"));
assertThat(loggers.state(0), is(LoggerState.COMMITTED));
}
@Test
public void dataIsAppendedToMultipleLoggers() {
Loggers loggers = new Loggers();
Checkpoints<String> checkpoints = new Checkpoints<>(loggers);
checkpoints.startLogging(1, new ByteArrayOutputStream());
checkpoints.append("a");
checkpoints.startLogging(2, new ByteArrayOutputStream());
checkpoints.append("b");
checkpoints.commitCheckpointsUntil(1);
checkpoints.append("c");
checkpoints.commitCheckpointsUntil(2);
assertThat(loggers.items(0), contains("a", "b"));
assertThat(loggers.items(1), contains("b", "c"));
}
@Test
public void committingALaterCheckpointCommitsPreviousCheckpoints() {
Loggers loggers = new Loggers();
Checkpoints<String> checkpoints = new Checkpoints<>(loggers);
checkpoints.startLogging(1, new ByteArrayOutputStream());
checkpoints.startLogging(2, new ByteArrayOutputStream());
checkpoints.commitCheckpointsUntil(2);
assertThat(loggers.state(0), is(LoggerState.COMMITTED));
assertThat(loggers.state(1), is(LoggerState.COMMITTED));
}
private enum LoggerState {
IDLE,
LOGGING,
COMMITTED,
CLOSED
}
private static final class Loggers implements Supplier<FeedbackLogger<String>> {
private final List<FakeLogger> loggers = new ArrayList<>();
@Override
public FeedbackLogger<String> get() {
FakeLogger logger = new FakeLogger();
loggers.add(logger);
return logger;
}
List<String> items(int loggerIndex) {
Preconditions.checkElementIndex(loggerIndex, loggers.size());
FakeLogger logger = loggers.get(loggerIndex);
return logger.items;
}
LoggerState state(int loggerIndex) {
Preconditions.checkElementIndex(loggerIndex, loggers.size());
FakeLogger logger = loggers.get(loggerIndex);
return logger.state;
}
}
private static final class FakeLogger implements FeedbackLogger<String> {
List<String> items = new ArrayList<>();
LoggerState state = LoggerState.IDLE;
@Override
public void startLogging(OutputStream keyedStateCheckpointOutputStream) {
Preconditions.checkState(state == LoggerState.IDLE);
state = LoggerState.LOGGING;
}
@Override
public void append(String message) {
Preconditions.checkState(state != LoggerState.COMMITTED);
Preconditions.checkState(state != LoggerState.CLOSED);
items.add(message);
}
@Override
public void commit() {
Preconditions.checkState(state == LoggerState.LOGGING);
state = LoggerState.COMMITTED;
}
@Override
public void close() {
state = LoggerState.CLOSED;
}
}
}
| 6,078 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/nettyclient/NettyProtobufTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import java.util.ArrayDeque;
import java.util.Arrays;
import java.util.concurrent.ThreadLocalRandom;
import java.util.function.IntFunction;
import org.apache.flink.shaded.netty4.io.netty.buffer.ByteBuf;
import org.apache.flink.shaded.netty4.io.netty.buffer.ByteBufAllocator;
import org.apache.flink.statefun.sdk.reqreply.generated.Address;
import org.junit.After;
import org.junit.Test;
public class NettyProtobufTest {
@After
public void tearDown() {
ALLOCATOR.close();
}
private final AutoReleasingAllocator ALLOCATOR = new AutoReleasingAllocator();
@Test
public void roundTrip() {
char[] chars = new char[1024 * 1024];
Arrays.fill(chars, 'x');
String pad = new String(chars);
for (int i = 0; i < 100; i++) {
int size = ThreadLocalRandom.current().nextInt(1, pad.length());
Address original =
Address.newBuilder()
.setNamespace("namespace")
.setType("type")
.setId(pad.substring(0, size))
.build();
Address actual = serdeRoundTrip(ALLOCATOR, original);
assertThat(actual, is(original));
}
}
@Test
public void heapBufferRoundTrip() {
char[] chars = new char[1024 * 1024];
Arrays.fill(chars, 'x');
String pad = new String(chars);
IntFunction<ByteBuf> heapAllocator = ByteBufAllocator.DEFAULT::heapBuffer;
for (int i = 0; i < 100; i++) {
int size = ThreadLocalRandom.current().nextInt(1, pad.length());
Address original =
Address.newBuilder()
.setNamespace("namespace")
.setType("type")
.setId(pad.substring(0, size))
.build();
Address actual = serdeRoundTrip(heapAllocator, original);
assertThat(actual, is(original));
}
}
private Address serdeRoundTrip(IntFunction<ByteBuf> allocator, Address original) {
ByteBuf buf = NettyProtobuf.serializeProtobuf(allocator, original);
Address got = NettyProtobuf.deserializeProtobuf(buf, Address.parser());
buf.release();
return got;
}
private static final class AutoReleasingAllocator implements IntFunction<ByteBuf>, AutoCloseable {
private final ArrayDeque<ByteBuf> allocatedDuringATest = new ArrayDeque<>();
@Override
public ByteBuf apply(int value) {
ByteBuf buf = ByteBufAllocator.DEFAULT.directBuffer(value);
allocatedDuringATest.addLast(buf);
return buf;
}
@Override
public void close() {
for (ByteBuf buf : allocatedDuringATest) {
int refCount = buf.refCnt();
if (refCount > 0) {
buf.release(refCount);
}
}
}
}
}
| 6,079 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/nettyclient/NettyClientTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import static org.apache.flink.statefun.flink.core.httpfn.TransportClientTest.FromFunctionNettyTestServer.*;
import static org.apache.flink.statefun.flink.core.nettyclient.NettyProtobuf.serializeProtobuf;
import static org.junit.Assert.*;
import java.net.URI;
import java.net.URL;
import java.time.Duration;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import javax.net.ssl.SSLException;
import org.apache.flink.shaded.netty4.io.netty.buffer.ByteBuf;
import org.apache.flink.shaded.netty4.io.netty.channel.ChannelDuplexHandler;
import org.apache.flink.shaded.netty4.io.netty.channel.ChannelHandlerContext;
import org.apache.flink.shaded.netty4.io.netty.channel.ChannelPromise;
import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.*;
import org.apache.flink.statefun.flink.core.httpfn.TransportClientTest;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
public class NettyClientTest extends TransportClientTest {
private static FromFunctionNettyTestServer testServer;
private static FromFunctionNettyTestServer.PortInfo portInfo;
@BeforeClass
public static void beforeClass() {
testServer = new FromFunctionNettyTestServer();
portInfo = testServer.runAndGetPortInfo();
}
@AfterClass
public static void afterClass() throws Exception {
testServer.close();
}
@Test
public void callingTestHttpServiceShouldSucceed() throws Throwable {
assertTrue(
TLS_FAILURE_MESSAGE,
callUsingStubsAndCheckSuccess(
createNettyClient(createHttpSpec(), "http", portInfo.getHttpPort())));
}
@Test
public void callingTestHttpServiceWithTlsFromPathShouldSucceed() throws Throwable {
URL caCertsUrl = getClass().getClassLoader().getResource(A_CA_CERTS_LOCATION);
URL clientCertUrl = getClass().getClassLoader().getResource(A_SIGNED_CLIENT_CERT_LOCATION);
URL clientKeyUrl = getClass().getClassLoader().getResource(A_SIGNED_CLIENT_KEY_LOCATION);
URL clientKeyPasswordUrl =
getClass().getClassLoader().getResource(A_SIGNED_CLIENT_KEY_PASSWORD_LOCATION);
assertNotNull(caCertsUrl);
assertNotNull(clientCertUrl);
assertNotNull(clientKeyUrl);
assertNotNull(clientKeyPasswordUrl);
assertTrue(
TLS_FAILURE_MESSAGE,
callUsingStubsAndCheckSuccess(
createNettyClient(
createSpec(
caCertsUrl.getPath(),
clientCertUrl.getPath(),
clientKeyUrl.getPath(),
clientKeyPasswordUrl.getPath()),
"https",
portInfo.getHttpsMutualTlsRequiredPort())));
}
@Test
public void callingTestHttpServiceWithTlsFromClasspathShouldSucceed() throws Throwable {
assertTrue(
TLS_FAILURE_MESSAGE,
callUsingStubsAndCheckSuccess(
createNettyClient(
createSpec(
"classpath:" + A_CA_CERTS_LOCATION,
"classpath:" + A_SIGNED_CLIENT_CERT_LOCATION,
"classpath:" + A_SIGNED_CLIENT_KEY_LOCATION,
"classpath:" + A_SIGNED_CLIENT_KEY_PASSWORD_LOCATION),
"https",
portInfo.getHttpsMutualTlsRequiredPort())));
}
@Test
public void callingTestHttpServiceWithTlsUsingKeyWithoutPasswordShouldSucceed() throws Throwable {
assertTrue(
TLS_FAILURE_MESSAGE,
callUsingStubsAndCheckSuccess(
createNettyClient(
createSpec(
"classpath:" + A_CA_CERTS_LOCATION,
"classpath:" + C_SIGNED_CLIENT_CERT_LOCATION,
"classpath:" + C_SIGNED_CLIENT_KEY_LOCATION,
null),
"https",
portInfo.getHttpsMutualTlsRequiredPort())));
}
@Test
public void callingTestHttpServiceWithJustServerSideTlsShouldSucceed() throws Throwable {
assertTrue(
TLS_FAILURE_MESSAGE,
callUsingStubsAndCheckSuccess(
createNettyClient(
createSpec("classpath:" + A_CA_CERTS_LOCATION, null, null, null),
"https",
portInfo.getHttpsServerTlsOnlyPort())));
}
@Test(expected = SSLException.class)
public void callingTestHttpServiceWithUntrustedTlsClientShouldFail() throws Throwable {
assertFalse(
TLS_FAILURE_MESSAGE,
callUsingStubsAndCheckSuccess(
createNettyClient(
createSpec(
"classpath:" + A_CA_CERTS_LOCATION,
"classpath:" + B_SIGNED_CLIENT_CERT_LOCATION,
"classpath:" + B_SIGNED_CLIENT_KEY_LOCATION,
"classpath:" + B_SIGNED_CLIENT_KEY_PASSWORD_LOCATION),
"https",
portInfo.getHttpsMutualTlsRequiredPort())));
}
@Test(expected = SSLException.class)
public void callingAnUntrustedTestHttpServiceWithTlsClientShouldFail() throws Throwable {
assertFalse(
TLS_FAILURE_MESSAGE,
callUsingStubsAndCheckSuccess(
createNettyClient(
createSpec(
"classpath:" + B_CA_CERTS_LOCATION,
"classpath:" + A_SIGNED_CLIENT_CERT_LOCATION,
"classpath:" + A_SIGNED_CLIENT_KEY_LOCATION,
"classpath:" + A_SIGNED_CLIENT_KEY_PASSWORD_LOCATION),
"https",
portInfo.getHttpsMutualTlsRequiredPort())));
}
@Test(expected = SSLException.class)
public void callingTestHttpServiceWhereTlsRequiredButNoCertGivenShouldFail() throws Throwable {
assertFalse(
TLS_FAILURE_MESSAGE,
callUsingStubsAndCheckSuccess(
createNettyClient(
createSpec("classpath:" + A_CA_CERTS_LOCATION, null, null, null),
"https",
portInfo.getHttpsMutualTlsRequiredPort())));
}
@Test(expected = IllegalStateException.class)
public void callingTestHttpServerWithNonExistentCertsShouldFail() throws Throwable {
assertFalse(
TLS_FAILURE_MESSAGE,
callUsingStubsAndCheckSuccess(
createNettyClient(
createSpec("classpath:" + "DEFINITELY_NON_EXISTENT", null, null, null),
"https",
portInfo.getHttpsServerTlsOnlyPort())));
}
private NettyClientWithResultStatusCodeFuture createNettyClient(
NettyRequestReplySpec spec, String protocol, int port) {
CompletableFuture<Integer> statusCodeFuture = new CompletableFuture<>();
NettyClient nettyClient =
NettyClient.from(
new NettySharedResources(),
spec,
URI.create(String.format("%s://localhost:%s", protocol, port)),
() ->
new ChannelDuplexHandler() {
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause)
throws Exception {
statusCodeFuture.completeExceptionally(cause);
super.exceptionCaught(ctx, cause);
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
final FullHttpResponse response =
(msg instanceof FullHttpResponse) ? (FullHttpResponse) msg : null;
if (response != null) {
statusCodeFuture.complete(response.status().code());
} else {
statusCodeFuture.completeExceptionally(
new IllegalStateException(
"the object received by the test is not a FullHttpResponse"));
}
super.channelRead(ctx, msg);
}
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) {
final NettyRequest request = (NettyRequest) msg;
final ByteBuf bodyBuf =
serializeProtobuf(ctx.channel().alloc()::buffer, request.toFunction());
DefaultFullHttpRequest http =
new DefaultFullHttpRequest(
HttpVersion.HTTP_1_1,
HttpMethod.POST,
request.uri(),
bodyBuf,
new DefaultHttpHeaders(),
NettyHeaders.EMPTY);
ctx.writeAndFlush(http);
}
});
return new NettyClientWithResultStatusCodeFuture(nettyClient, statusCodeFuture);
}
private NettyRequestReplySpec createHttpSpec() {
return createSpec(null, null, null, null);
}
private NettyRequestReplySpec createSpec(
String trustedCaCerts, String clientCerts, String clientKey, String clientKeyPassword) {
return new NettyRequestReplySpec(
Duration.ofMinutes(1L),
Duration.ofMinutes(1L),
Duration.ofMinutes(1L),
1,
128,
trustedCaCerts,
clientCerts,
clientKey,
clientKeyPassword,
new NettyRequestReplySpec.Timeouts());
}
private Boolean callUsingStubsAndCheckSuccess(
NettyClientWithResultStatusCodeFuture nettyClientAndStatusCodeFuture) throws Throwable {
NettyRequest nettyRequest =
new NettyRequest(
nettyClientAndStatusCodeFuture.nettyClient,
getFakeMetrics(),
getStubRequestSummary(),
getEmptyToFunction());
nettyRequest.start();
try {
return nettyClientAndStatusCodeFuture.resultStatusCodeFuture.get(5, TimeUnit.SECONDS) == 200;
} catch (ExecutionException e) {
throw e.getCause().getCause();
}
}
private static class NettyClientWithResultStatusCodeFuture {
private final NettyClient nettyClient;
private final CompletableFuture<Integer> resultStatusCodeFuture;
public NettyClientWithResultStatusCodeFuture(
NettyClient nettyClient, CompletableFuture<Integer> resultStatusCodeFuture) {
this.nettyClient = nettyClient;
this.resultStatusCodeFuture = resultStatusCodeFuture;
}
}
}
| 6,080 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/nettyclient/NettyRequestTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import java.io.Closeable;
import java.net.SocketAddress;
import java.time.Duration;
import java.util.ArrayDeque;
import java.util.IdentityHashMap;
import java.util.concurrent.CompletableFuture;
import java.util.function.BiConsumer;
import org.apache.flink.shaded.netty4.io.netty.channel.AbstractChannel;
import org.apache.flink.shaded.netty4.io.netty.channel.Channel;
import org.apache.flink.shaded.netty4.io.netty.channel.ChannelConfig;
import org.apache.flink.shaded.netty4.io.netty.channel.ChannelMetadata;
import org.apache.flink.shaded.netty4.io.netty.channel.ChannelOutboundBuffer;
import org.apache.flink.shaded.netty4.io.netty.channel.EventLoop;
import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.ReadOnlyHttpHeaders;
import org.apache.flink.statefun.flink.core.metrics.RemoteInvocationMetrics;
import org.apache.flink.statefun.flink.core.nettyclient.exceptions.DisconnectedException;
import org.apache.flink.statefun.flink.core.nettyclient.exceptions.ShutdownException;
import org.apache.flink.statefun.flink.core.reqreply.ToFunctionRequestSummary;
import org.apache.flink.statefun.sdk.Address;
import org.apache.flink.statefun.sdk.FunctionType;
import org.apache.flink.statefun.sdk.reqreply.generated.FromFunction;
import org.apache.flink.statefun.sdk.reqreply.generated.ToFunction;
import org.junit.Assert;
import org.junit.Test;
public class NettyRequestTest {
private final FakeMetrics FAKE_METRICS = new FakeMetrics();
@Test
public void successfulSanity() {
FakeClient fakeClient = new FakeClient();
NettyRequest request =
new NettyRequest(fakeClient, FAKE_METRICS, FAKE_SUMMARY, ToFunction.getDefaultInstance());
request.start();
request.complete(FromFunction.getDefaultInstance());
assertThat(request.result().join(), is(FromFunction.getDefaultInstance()));
}
@Test
public void unSuccessfulSanity() {
FakeClient fakeClient = new FakeClient();
NettyRequest request =
new NettyRequest(fakeClient, FAKE_METRICS, FAKE_SUMMARY, ToFunction.getDefaultInstance());
request.start();
request.completeAttemptExceptionally(ShutdownException.INSTANCE);
assertThat(request.result().isCompletedExceptionally(), is(true));
}
@Test
public void canNotAcquireChannel() {
// a client that never returns a channel.
class alwaysFailingToAcquireChannel extends FakeClient {
@Override
public void acquireChannel(BiConsumer<Channel, Throwable> consumer) {
consumer.accept(null, new IllegalStateException("no channel for you"));
}
}
NettyRequest request =
new NettyRequest(
new alwaysFailingToAcquireChannel(),
FAKE_METRICS,
FAKE_SUMMARY,
ToFunction.getDefaultInstance());
CompletableFuture<FromFunction> result = request.start();
assertThat(result.isCompletedExceptionally(), is(true));
}
@Test
public void acquiredChannelShouldBeReleased() {
FakeClient fakeClient = new FakeClient();
NettyRequest request =
new NettyRequest(fakeClient, FAKE_METRICS, FAKE_SUMMARY, ToFunction.getDefaultInstance());
request.start();
assertEquals(1, fakeClient.LIVE_CHANNELS.size());
request.completeAttemptExceptionally(ShutdownException.INSTANCE);
assertEquals(0, fakeClient.LIVE_CHANNELS.size());
}
@Test
public void failingWriteShouldFailTheRequest() {
// the following is a client that allows acquiring a channel
class client extends FakeClient {
@Override
public <T> void writeAndFlush(T what, Channel ch, BiConsumer<Void, Throwable> andThen) {
andThen.accept(null, new IllegalStateException("can't write."));
}
}
client fakeClient = new client();
NettyRequest request =
new NettyRequest(fakeClient, FAKE_METRICS, FAKE_SUMMARY, ToFunction.getDefaultInstance());
request.start();
Assert.assertTrue(request.result().isCompletedExceptionally());
}
@Test
public void testRemainBudget() {
FakeClient fakeClient = new FakeClient();
fakeClient.REQUEST_BUDGET = Duration.ofMillis(20).toNanos();
NettyRequest request =
new NettyRequest(fakeClient, FAKE_METRICS, FAKE_SUMMARY, ToFunction.getDefaultInstance());
request.start();
// move the clock 5ms forward
fakeClient.NOW += Duration.ofMillis(5).toNanos();
// fail the request
request.completeAttemptExceptionally(DisconnectedException.INSTANCE);
Assert.assertFalse(request.result().isDone());
assertEquals(Duration.ofMillis(15).toNanos(), request.remainingRequestBudgetNanos());
}
@Test
public void testRetries() {
FakeClient fakeClient = new FakeClient();
fakeClient.REQUEST_BUDGET = Duration.ofMillis(20).toNanos();
NettyRequest request =
new NettyRequest(fakeClient, FAKE_METRICS, FAKE_SUMMARY, ToFunction.getDefaultInstance());
request.start();
for (int i = 0; i < 20; i++) {
request.completeAttemptExceptionally(DisconnectedException.INSTANCE);
if (request.result().isCompletedExceptionally()) {
return;
}
fakeClient.NOW += 1_000_000; // + 1ms
fakeClient.TIMEOUTS.pop().run();
}
throw new AssertionError();
}
// ---------------------------------------------------------------------------------------------------------
// Test collaborators
// ---------------------------------------------------------------------------------------------------------
@SuppressWarnings({"MismatchedQueryAndUpdateOfCollection", "FieldCanBeLocal", "FieldMayBeFinal"})
private static class FakeClient implements NettyClientService {
// test knobs
private long NOW = 0;
private long REQUEST_BUDGET = 0;
final ArrayDeque<Runnable> TIMEOUTS = new ArrayDeque<>();
final IdentityHashMap<FakeChannel, Boolean> LIVE_CHANNELS = new IdentityHashMap<>();
@Override
public void acquireChannel(BiConsumer<Channel, Throwable> consumer) {
FakeChannel ch = new FakeChannel();
LIVE_CHANNELS.put(ch, Boolean.TRUE);
consumer.accept(ch, null);
}
@SuppressWarnings("SuspiciousMethodCalls")
@Override
public void releaseChannel(Channel channel) {
Boolean existed = LIVE_CHANNELS.remove(channel);
if (existed == null) {
throw new AssertionError("Trying to release a non allocated channel");
}
}
@Override
public String queryPath() {
return "/";
}
@Override
public ReadOnlyHttpHeaders headers() {
return new ReadOnlyHttpHeaders(false);
}
@Override
public long totalRequestBudgetInNanos() {
return REQUEST_BUDGET;
}
@Override
public Closeable newTimeout(Runnable client, long delayInNanos) {
TIMEOUTS.add(client);
return () -> {};
}
@Override
public void runOnEventLoop(Runnable task) {
task.run();
}
@Override
public boolean isShutdown() {
return false;
}
@Override
public long systemNanoTime() {
return NOW;
}
@Override
public <T> void writeAndFlush(T what, Channel ch, BiConsumer<Void, Throwable> andThen) {}
}
private static final class FakeMetrics implements RemoteInvocationMetrics {
int failures;
@Override
public void remoteInvocationFailures() {
failures++;
}
@Override
public void remoteInvocationLatency(long elapsed) {}
}
private static final ToFunctionRequestSummary FAKE_SUMMARY =
new ToFunctionRequestSummary(new Address(new FunctionType("a", "b"), "c"), 50, 3, 1);
public static class FakeChannel extends AbstractChannel {
public FakeChannel() {
super(null);
}
@Override
protected AbstractUnsafe newUnsafe() {
return null;
}
@Override
protected boolean isCompatible(EventLoop eventLoop) {
return false;
}
@Override
protected SocketAddress localAddress0() {
return null;
}
@Override
protected SocketAddress remoteAddress0() {
return null;
}
@Override
protected void doBind(SocketAddress socketAddress) {}
@Override
protected void doDisconnect() {}
@Override
protected void doClose() {}
@Override
protected void doBeginRead() {}
@Override
protected void doWrite(ChannelOutboundBuffer channelOutboundBuffer) {}
@Override
public ChannelConfig config() {
return null;
}
@Override
public boolean isOpen() {
return false;
}
@Override
public boolean isActive() {
return false;
}
@Override
public ChannelMetadata metadata() {
return null;
}
}
}
| 6,081 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/nettyclient/EndpointTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.nettyclient;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import java.net.InetSocketAddress;
import java.net.URI;
import org.junit.Test;
public class EndpointTest {
@Test
public void exampleUsage() {
Endpoint endpoint = new Endpoint(URI.create("https://api.gateway.com:1234/statefun?xyz=5678"));
assertThat(endpoint.useTls(), is(true));
assertThat(endpoint.serviceAddress().getHostString(), is("api.gateway.com"));
assertThat(endpoint.serviceAddress().getPort(), is(1234));
assertThat(endpoint.queryPath(), is("/statefun?xyz=5678"));
}
@Test
public void anotherExample() {
Endpoint endpoint = new Endpoint(URI.create("https://greeter-svc/statefun"));
assertThat(endpoint.useTls(), is(true));
assertThat(endpoint.queryPath(), is("/statefun"));
InetSocketAddress serviceAddress = endpoint.serviceAddress();
assertThat(serviceAddress.getHostString(), is("greeter-svc"));
assertThat(serviceAddress.getPort(), is(443));
}
@Test
public void emptyQueryPathIsASingleSlash() {
Endpoint endpoint = new Endpoint(URI.create("http://greeter-svc"));
assertThat(endpoint.queryPath(), is("/"));
}
@Test
public void dontUseTls() {
Endpoint endpoint = new Endpoint(URI.create("http://api.gateway.com:1234/statefun?xyz=5678"));
assertThat(endpoint.useTls(), is(false));
}
@Test
public void useTls() {
Endpoint endpoint = new Endpoint(URI.create("https://foobar.net"));
assertThat(endpoint.useTls(), is(true));
}
}
| 6,082 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/functions/PendingAsyncOperationsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import static org.apache.flink.statefun.flink.core.TestUtils.FUNCTION_1_ADDR;
import static org.apache.flink.statefun.flink.core.TestUtils.FUNCTION_2_ADDR;
import static org.hamcrest.CoreMatchers.allOf;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.Matchers.hasKey;
import static org.junit.Assert.assertThat;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.function.Consumer;
import org.apache.flink.api.common.state.MapState;
import org.apache.flink.statefun.flink.core.TestUtils;
import org.apache.flink.statefun.flink.core.message.Message;
import org.apache.flink.statefun.sdk.Address;
import org.hamcrest.Description;
import org.hamcrest.Matcher;
import org.hamcrest.TypeSafeMatcher;
import org.junit.Test;
public class PendingAsyncOperationsTest {
private final MemoryMapState<Long, Message> miniStateBackend = new MemoryMapState<>();
private final Message dummyMessage = TestUtils.ENVELOPE_FACTORY.from(1);
@Test
public void exampleUsage() {
PendingAsyncOperations pendingOps =
new PendingAsyncOperations(miniStateBackend, miniStateBackend);
miniStateBackend.setCurrentAddress(FUNCTION_1_ADDR);
pendingOps.add(FUNCTION_1_ADDR, 1, dummyMessage);
pendingOps.flush();
assertThat(miniStateBackend, matchesAddressState(FUNCTION_1_ADDR, hasKey(1L)));
}
@Test
public void itemsAreExplicitlyFlushed() {
PendingAsyncOperations pendingOps =
new PendingAsyncOperations(miniStateBackend, miniStateBackend);
miniStateBackend.setCurrentAddress(FUNCTION_1_ADDR);
pendingOps.add(FUNCTION_1_ADDR, 1, dummyMessage);
assertThat(miniStateBackend, not(matchesAddressState(FUNCTION_1_ADDR, hasKey(1L))));
}
@Test
public void inFlightItemsDoNotFlush() {
PendingAsyncOperations pendingOps =
new PendingAsyncOperations(miniStateBackend, miniStateBackend);
miniStateBackend.setCurrentAddress(FUNCTION_1_ADDR);
pendingOps.add(FUNCTION_1_ADDR, 1, dummyMessage);
pendingOps.remove(FUNCTION_1_ADDR, 1);
pendingOps.flush();
assertThat(miniStateBackend, not(matchesAddressState(FUNCTION_1_ADDR, hasKey(1L))));
}
@Test
public void differentAddressesShouldBeFlushedToTheirStates() {
PendingAsyncOperations pendingOps =
new PendingAsyncOperations(miniStateBackend, miniStateBackend);
miniStateBackend.setCurrentAddress(FUNCTION_1_ADDR);
pendingOps.add(FUNCTION_1_ADDR, 1, dummyMessage);
miniStateBackend.setCurrentAddress(FUNCTION_2_ADDR);
pendingOps.add(FUNCTION_2_ADDR, 1, dummyMessage);
pendingOps.flush();
assertThat(
miniStateBackend,
allOf(
matchesAddressState(FUNCTION_1_ADDR, hasKey(1L)),
matchesAddressState(FUNCTION_2_ADDR, hasKey(1L))));
}
private static <K, V, M> Matcher<MemoryMapState<K, V>> matchesAddressState(
Address address, Matcher<M> matcher) {
return new TypeSafeMatcher<MemoryMapState<K, V>>() {
@Override
protected boolean matchesSafely(MemoryMapState<K, V> memoryMapState) {
return matcher.matches(memoryMapState.states.get(address));
}
@Override
public void describeTo(Description description) {
matcher.describeTo(description);
}
};
}
private static final class MemoryMapState<K, V> implements MapState<K, V>, Consumer<Address> {
Map<Address, Map<K, V>> states = new HashMap<>();
Address address;
@Override
public void accept(Address address) {
this.address = address;
}
public void setCurrentAddress(Address address) {
this.address = address;
}
public Map<K, V> perCurrentAddressState() {
assert address != null;
return states.computeIfAbsent(address, unused -> new HashMap<>());
}
@Override
public V get(K key) {
return perCurrentAddressState().get(key);
}
@Override
public void put(K key, V value) {
perCurrentAddressState().put(key, value);
}
@Override
public void putAll(Map<K, V> map) {
perCurrentAddressState().putAll(map);
}
@Override
public void remove(K key) {
perCurrentAddressState().remove(key);
}
@Override
public boolean contains(K key) {
return perCurrentAddressState().containsKey(key);
}
@Override
public Iterable<Entry<K, V>> entries() {
return perCurrentAddressState().entrySet();
}
@Override
public Iterable<K> keys() {
return perCurrentAddressState().keySet();
}
@Override
public Iterable<V> values() {
return perCurrentAddressState().values();
}
@Override
public Iterator<Entry<K, V>> iterator() {
return perCurrentAddressState().entrySet().iterator();
}
@Override
public boolean isEmpty() {
return perCurrentAddressState().isEmpty();
}
@Override
public void clear() {
perCurrentAddressState().clear();
}
}
}
| 6,083 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/functions/PredefinedFunctionLoaderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.core.IsNull.notNullValue;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.apache.flink.statefun.sdk.*;
import org.apache.flink.statefun.sdk.StatefulFunction;
import org.junit.Test;
public class PredefinedFunctionLoaderTest {
private static final FunctionType TEST_TYPE = new FunctionType("namespace", "name");
@Test
public void exampleUsage() {
PredefinedFunctionLoader loader =
new PredefinedFunctionLoader(specificFunctionProviders(), Collections.emptyMap());
StatefulFunction function = loader.load(TEST_TYPE);
assertThat(function, notNullValue());
}
@Test
public void withOnlyPerNamespaceFunctionProviders() {
PredefinedFunctionLoader loader =
new PredefinedFunctionLoader(Collections.emptyMap(), perNamespaceFunctionProviders());
StatefulFunction function = loader.load(TEST_TYPE);
assertThat(function, notNullValue());
}
@Test
public void specificFunctionProvidersHigherPrecedence() {
PredefinedFunctionLoader loader =
new PredefinedFunctionLoader(specificFunctionProviders(), perNamespaceFunctionProviders());
StatefulFunction function = loader.load(TEST_TYPE);
assertThat(function, instanceOf(StatefulFunctionA.class));
}
@Test(expected = IllegalArgumentException.class)
public void nullLoadedFunctions() {
PredefinedFunctionLoader loader =
new PredefinedFunctionLoader(specificFunctionProviders(), Collections.emptyMap());
loader.load(new FunctionType("doesn't", "exist"));
}
private static Map<FunctionType, StatefulFunctionProvider> specificFunctionProviders() {
final Map<FunctionType, StatefulFunctionProvider> providers = new HashMap<>();
providers.put(
new FunctionType(TEST_TYPE.namespace(), TEST_TYPE.name()), new SpecificFunctionProvider());
return providers;
}
private static Map<String, StatefulFunctionProvider> perNamespaceFunctionProviders() {
final Map<String, StatefulFunctionProvider> providers = new HashMap<>();
providers.put(TEST_TYPE.namespace(), new PerNamespaceFunctionProvider());
return providers;
}
private static class SpecificFunctionProvider implements StatefulFunctionProvider {
@Override
public org.apache.flink.statefun.sdk.StatefulFunction functionOfType(FunctionType type) {
if (type.equals(TEST_TYPE)) {
return new StatefulFunctionA();
} else {
return null;
}
}
}
private static class PerNamespaceFunctionProvider implements StatefulFunctionProvider {
@Override
public org.apache.flink.statefun.sdk.StatefulFunction functionOfType(FunctionType type) {
if (type.equals(TEST_TYPE)) {
return new StatefulFunctionB();
} else {
return null;
}
}
}
private static class StatefulFunctionA implements StatefulFunction {
@Override
public void invoke(Context context, Object input) {}
}
private static class StatefulFunctionB implements StatefulFunction {
@Override
public void invoke(Context context, Object input) {}
}
}
| 6,084 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/functions/LocalStatefulFunctionGroupTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import static org.apache.flink.statefun.flink.core.TestUtils.ENVELOPE_FACTORY;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import org.apache.flink.statefun.flink.core.generated.EnvelopeAddress;
import org.apache.flink.statefun.flink.core.message.Message;
import org.apache.flink.statefun.flink.core.metrics.FunctionTypeMetrics;
import org.apache.flink.statefun.sdk.Address;
import org.apache.flink.statefun.sdk.Context;
import org.apache.flink.statefun.sdk.FunctionType;
import org.apache.flink.statefun.sdk.io.EgressIdentifier;
import org.apache.flink.statefun.sdk.metrics.Metrics;
import org.junit.Test;
public class LocalStatefulFunctionGroupTest {
// test constants
private static final FunctionType FUNCTION_TYPE = new FunctionType("test", "a");
private static final Address FUNCTION_1_ADDR = new Address(FUNCTION_TYPE, "a-1");
private static final Address FUNCTION_2_ADDR = new Address(FUNCTION_TYPE, "a-2");
private static final EnvelopeAddress DUMMY_PAYLOAD = EnvelopeAddress.getDefaultInstance();
// test collaborators
private final FakeContext context = new FakeContext();
private final FakeFunction function = new FakeFunction();
private final FakeFunctionRepository fakeRepository = new FakeFunctionRepository(function);
// object under test
private final LocalFunctionGroup functionGroupUnderTest =
new LocalFunctionGroup(fakeRepository, context);
@Test
public void sanity() {
boolean processed = functionGroupUnderTest.processNextEnvelope();
assertThat(processed, is(false));
}
@Test
public void addingMessageWouldBeProcessedLater() {
Message message = ENVELOPE_FACTORY.from(FUNCTION_1_ADDR, FUNCTION_2_ADDR, DUMMY_PAYLOAD);
functionGroupUnderTest.enqueue(message);
assertThat(functionGroupUnderTest.processNextEnvelope(), is(true));
assertThat(functionGroupUnderTest.processNextEnvelope(), is(false));
}
@Test
public void aFunctionWouldReceiveAMessageAddressedToIt() {
Message message = ENVELOPE_FACTORY.from(FUNCTION_1_ADDR, FUNCTION_2_ADDR, DUMMY_PAYLOAD);
functionGroupUnderTest.enqueue(message);
functionGroupUnderTest.processNextEnvelope();
Message m = function.receivedMessages.get(0);
assertThat(m.target(), is(message.target()));
}
// ---------------------------------------------------------------------------
// test helpers
// ---------------------------------------------------------------------------
static final class FakeFunction implements LiveFunction {
List<Message> receivedMessages = new ArrayList<>();
@Override
public void receive(Context context, Message message) {
receivedMessages.add(message);
}
@Override
public FunctionTypeMetrics metrics() {
throw new UnsupportedOperationException();
}
}
static final class FakeFunctionRepository implements FunctionRepository {
private LiveFunction function;
FakeFunctionRepository(FakeFunction function) {
this.function = function;
}
@Override
public LiveFunction get(FunctionType type) {
return function;
}
}
static final class FakeContext implements ApplyingContext {
Message in;
@Override
public Address self() {
return in.target();
}
@Override
public Address caller() {
return in.source();
}
@Override
public void send(Address to, Object message) {}
@Override
public <T> void send(EgressIdentifier<T> egress, T what) {}
@Override
public void sendAfter(Duration duration, Address to, Object message) {}
@Override
public void sendAfter(Duration delay, Address to, Object message, String cancellationToken) {}
@Override
public void cancelDelayedMessage(String cancellationToken) {}
@Override
public <M, T> void registerAsyncOperation(M metadata, CompletableFuture<T> future) {}
@Override
public Metrics metrics() {
throw new UnsupportedOperationException();
}
@Override
public void apply(LiveFunction function, Message inMessage) {
in = inMessage;
function.receive(this, inMessage);
}
}
}
| 6,085 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/functions/ReductionsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.functions;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.junit.Assert.assertThat;
import java.io.Serializable;
import java.util.*;
import java.util.Map.Entry;
import java.util.stream.Stream;
import javax.annotation.Nonnull;
import org.apache.flink.api.common.ExecutionConfig;
import org.apache.flink.api.common.JobID;
import org.apache.flink.api.common.accumulators.*;
import org.apache.flink.api.common.accumulators.Histogram;
import org.apache.flink.api.common.cache.DistributedCache;
import org.apache.flink.api.common.externalresource.ExternalResourceInfo;
import org.apache.flink.api.common.functions.BroadcastVariableInitializer;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.api.common.state.*;
import org.apache.flink.api.common.typeutils.TypeSerializer;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.metrics.*;
import org.apache.flink.metrics.groups.OperatorMetricGroup;
import org.apache.flink.runtime.state.*;
import org.apache.flink.runtime.state.heap.HeapPriorityQueueElement;
import org.apache.flink.runtime.state.internal.InternalListState;
import org.apache.flink.shaded.guava30.com.google.common.util.concurrent.MoreExecutors;
import org.apache.flink.statefun.flink.core.StatefulFunctionsUniverse;
import org.apache.flink.statefun.flink.core.TestUtils;
import org.apache.flink.statefun.flink.core.backpressure.ThresholdBackPressureValve;
import org.apache.flink.statefun.flink.core.message.Message;
import org.apache.flink.statefun.flink.core.message.MessageFactoryKey;
import org.apache.flink.statefun.flink.core.message.MessageFactoryType;
import org.apache.flink.streaming.api.operators.InternalTimerService;
import org.apache.flink.streaming.api.operators.Output;
import org.apache.flink.streaming.api.operators.Triggerable;
import org.apache.flink.streaming.api.watermark.Watermark;
import org.apache.flink.streaming.runtime.streamrecord.LatencyMarker;
import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
import org.apache.flink.streaming.runtime.watermarkstatus.WatermarkStatus;
import org.apache.flink.util.OutputTag;
import org.apache.flink.util.function.BiConsumerWithException;
import org.junit.Test;
public class ReductionsTest {
@Test
public void testFactory() {
Reductions reductions =
Reductions.create(
new ThresholdBackPressureValve(-1),
new StatefulFunctionsUniverse(
MessageFactoryKey.forType(MessageFactoryType.WITH_KRYO_PAYLOADS, null)),
new FakeRuntimeContext(),
new FakeKeyedStateBackend(),
new FakeTimerServiceFactory(),
new FakeInternalListState(),
new FakeMapState<>(),
new HashMap<>(),
new FakeOutput(),
TestUtils.ENVELOPE_FACTORY,
MoreExecutors.directExecutor(),
new FakeMetricGroup(),
new FakeMapState<>());
assertThat(reductions, notNullValue());
}
@SuppressWarnings("deprecation")
private static final class FakeRuntimeContext implements RuntimeContext {
@Override
public <T> ValueState<T> getState(ValueStateDescriptor<T> stateProperties) {
return new ValueState<T>() {
@Override
public T value() {
return null;
}
@Override
public void update(T value) {}
@Override
public void clear() {}
};
}
@Override
public <UK, UV> MapState<UK, UV> getMapState(MapStateDescriptor<UK, UV> stateProperties) {
return new MapState<UK, UV>() {
@Override
public UV get(UK key) {
return null;
}
@Override
public void put(UK key, UV value) {}
@Override
public void putAll(Map<UK, UV> map) {}
@Override
public void remove(UK key) {}
@Override
public boolean contains(UK key) {
return false;
}
@Override
public Iterable<Entry<UK, UV>> entries() {
return null;
}
@Override
public Iterable<UK> keys() {
return null;
}
@Override
public Iterable<UV> values() {
return null;
}
@Override
public Iterator<Entry<UK, UV>> iterator() {
return null;
}
@Override
public boolean isEmpty() throws Exception {
return true;
}
@Override
public void clear() {}
};
}
@Override
public ExecutionConfig getExecutionConfig() {
return new ExecutionConfig();
}
// everything below this line would throw UnspportedOperationException()
@Override
public String getTaskName() {
throw new UnsupportedOperationException();
}
@Override
public OperatorMetricGroup getMetricGroup() {
throw new UnsupportedOperationException();
}
@Override
public int getNumberOfParallelSubtasks() {
return 0;
}
@Override
public int getMaxNumberOfParallelSubtasks() {
return 0;
}
@Override
public int getIndexOfThisSubtask() {
return 0;
}
@Override
public int getAttemptNumber() {
return 0;
}
@Override
public String getTaskNameWithSubtasks() {
throw new UnsupportedOperationException();
}
@Override
public ClassLoader getUserCodeClassLoader() {
throw new UnsupportedOperationException();
}
@Override
public <V, A extends Serializable> void addAccumulator(
String name, Accumulator<V, A> accumulator) {}
@Override
public <V, A extends Serializable> Accumulator<V, A> getAccumulator(String name) {
throw new UnsupportedOperationException();
}
@Override
public IntCounter getIntCounter(String name) {
throw new UnsupportedOperationException();
}
@Override
public LongCounter getLongCounter(String name) {
throw new UnsupportedOperationException();
}
@Override
public DoubleCounter getDoubleCounter(String name) {
throw new UnsupportedOperationException();
}
@Override
public Histogram getHistogram(String name) {
throw new UnsupportedOperationException();
}
@Override
public Set<ExternalResourceInfo> getExternalResourceInfos(String resourceName) {
throw new UnsupportedOperationException();
}
@Override
public boolean hasBroadcastVariable(String name) {
return false;
}
@Override
public <RT> List<RT> getBroadcastVariable(String name) {
throw new UnsupportedOperationException();
}
@Override
public <T, C> C getBroadcastVariableWithInitializer(
String name, BroadcastVariableInitializer<T, C> initializer) {
throw new UnsupportedOperationException();
}
@Override
public DistributedCache getDistributedCache() {
throw new UnsupportedOperationException();
}
@Override
public <T> ListState<T> getListState(ListStateDescriptor<T> stateProperties) {
throw new UnsupportedOperationException();
}
@Override
public <T> ReducingState<T> getReducingState(ReducingStateDescriptor<T> stateProperties) {
throw new UnsupportedOperationException();
}
@Override
public <IN, ACC, OUT> AggregatingState<IN, OUT> getAggregatingState(
AggregatingStateDescriptor<IN, ACC, OUT> stateProperties) {
throw new UnsupportedOperationException();
}
@Override
public void registerUserCodeClassLoaderReleaseHookIfAbsent(String s, Runnable runnable) {
throw new UnsupportedOperationException();
}
@Override
public JobID getJobId() {
throw new UnsupportedOperationException();
}
}
private static final class FakeKeyedStateBackend implements KeyedStateBackend<Object> {
@Override
public <N, S extends State, T> void applyToAllKeys(
N namespace,
TypeSerializer<N> namespaceSerializer,
StateDescriptor<S, T> stateDescriptor,
KeyedStateFunction<Object, S> function) {}
@Override
public <N> Stream<Object> getKeys(String state, N namespace) {
throw new UnsupportedOperationException();
}
@Override
public <N, S extends State, T> S getOrCreateKeyedState(
TypeSerializer<N> namespaceSerializer, StateDescriptor<S, T> stateDescriptor) {
throw new UnsupportedOperationException();
}
@Override
public <N, S extends State> S getPartitionedState(
N namespace, TypeSerializer<N> namespaceSerializer, StateDescriptor<S, ?> stateDescriptor) {
throw new UnsupportedOperationException();
}
@Override
public void dispose() {}
@Override
public void registerKeySelectionListener(KeySelectionListener<Object> listener) {}
@Override
public boolean deregisterKeySelectionListener(KeySelectionListener<Object> listener) {
return false;
}
@Nonnull
@Override
public <N, SV, SEV, S extends State, IS extends S> IS createOrUpdateInternalState(
@Nonnull TypeSerializer<N> typeSerializer,
@Nonnull StateDescriptor<S, SV> stateDescriptor,
@Nonnull
StateSnapshotTransformer.StateSnapshotTransformFactory<SEV>
stateSnapshotTransformFactory)
throws Exception {
throw new UnsupportedOperationException();
}
@Nonnull
@Override
public <T extends HeapPriorityQueueElement & PriorityComparable<? super T> & Keyed<?>>
KeyGroupedInternalPriorityQueue<T> create(
@Nonnull String s, @Nonnull TypeSerializer<T> typeSerializer) {
throw new UnsupportedOperationException();
}
@Override
public Object getCurrentKey() {
throw new UnsupportedOperationException();
}
@Override
public void setCurrentKey(Object newKey) {}
@Override
public TypeSerializer<Object> getKeySerializer() {
throw new UnsupportedOperationException();
}
@Override
public <N> Stream<Tuple2<Object, N>> getKeysAndNamespaces(String state) {
throw new UnsupportedOperationException();
}
}
private static final class FakeTimerServiceFactory implements TimerServiceFactory {
@Override
public InternalTimerService<VoidNamespace> createTimerService(
Triggerable<String, VoidNamespace> triggerable) {
return new FakeTimerService();
}
}
private static final class FakeTimerService implements InternalTimerService<VoidNamespace> {
@Override
public long currentProcessingTime() {
return 0;
}
@Override
public long currentWatermark() {
return 0;
}
@Override
public void registerEventTimeTimer(VoidNamespace namespace, long time) {
throw new UnsupportedOperationException();
}
@Override
public void registerProcessingTimeTimer(VoidNamespace namespace, long time) {
throw new UnsupportedOperationException();
}
@Override
public void deleteEventTimeTimer(VoidNamespace namespace, long time) {
throw new UnsupportedOperationException();
}
@Override
public void deleteProcessingTimeTimer(VoidNamespace namespace, long time) {
throw new UnsupportedOperationException();
}
@Override
public void forEachEventTimeTimer(
BiConsumerWithException<VoidNamespace, Long, Exception> consumer) throws Exception {
throw new UnsupportedOperationException();
}
@Override
public void forEachProcessingTimeTimer(
BiConsumerWithException<VoidNamespace, Long, Exception> consumer) throws Exception {
throw new UnsupportedOperationException();
}
}
private static final class FakeInternalListState
implements InternalListState<String, Long, Message> {
@Override
public void add(Message value) throws Exception {
throw new UnsupportedOperationException();
}
@Override
public void addAll(List<Message> values) throws Exception {
throw new UnsupportedOperationException();
}
@Override
public void update(List<Message> values) throws Exception {
throw new UnsupportedOperationException();
}
@Override
public void updateInternal(List<Message> valueToStore) throws Exception {
throw new UnsupportedOperationException();
}
@Override
public void setCurrentNamespace(Long namespace) {
throw new UnsupportedOperationException();
}
@Override
public void clear() {
throw new UnsupportedOperationException();
}
@Override
public byte[] getSerializedValue(
byte[] serializedKeyAndNamespace,
TypeSerializer<String> safeKeySerializer,
TypeSerializer<Long> safeNamespaceSerializer,
TypeSerializer<List<Message>> safeValueSerializer)
throws Exception {
throw new UnsupportedOperationException();
}
@Override
public List<Message> getInternal() throws Exception {
throw new UnsupportedOperationException();
}
@Override
public Iterable<Message> get() throws Exception {
throw new UnsupportedOperationException();
}
@Override
public void mergeNamespaces(Long target, Collection<Long> sources) throws Exception {
throw new UnsupportedOperationException();
}
@Override
public StateIncrementalVisitor<String, Long, List<Message>> getStateIncrementalVisitor(
int recommendedMaxNumberOfReturnedRecords) {
throw new UnsupportedOperationException();
}
@Override
public TypeSerializer<Long> getNamespaceSerializer() {
throw new UnsupportedOperationException();
}
@Override
public TypeSerializer<String> getKeySerializer() {
throw new UnsupportedOperationException();
}
@Override
public TypeSerializer<List<Message>> getValueSerializer() {
throw new UnsupportedOperationException();
}
}
private static final class FakeMapState<K, V> implements MapState<K, V> {
@Override
public V get(K key) throws Exception {
return null;
}
@Override
public void put(K key, V value) throws Exception {}
@Override
public void putAll(Map<K, V> map) throws Exception {}
@Override
public void remove(K key) throws Exception {}
@Override
public boolean contains(K key) throws Exception {
return false;
}
@Override
public Iterable<Entry<K, V>> entries() throws Exception {
return null;
}
@Override
public Iterable<K> keys() throws Exception {
return null;
}
@Override
public Iterable<V> values() throws Exception {
return null;
}
@Override
public Iterator<Entry<K, V>> iterator() throws Exception {
return null;
}
@Override
public boolean isEmpty() throws Exception {
return true;
}
@Override
public void clear() {}
}
private static final class FakeOutput implements Output<StreamRecord<Message>> {
@Override
public void emitWatermark(Watermark mark) {}
@Override
public void emitWatermarkStatus(WatermarkStatus watermarkStatus) {}
@Override
public <X> void collect(OutputTag<X> outputTag, StreamRecord<X> record) {}
@Override
public void emitLatencyMarker(LatencyMarker latencyMarker) {}
@Override
public void collect(StreamRecord<Message> record) {}
@Override
public void close() {}
}
private static final class FakeMetricGroup implements MetricGroup {
@Override
public Counter counter(int i) {
throw new UnsupportedOperationException();
}
@Override
public Counter counter(String s) {
return new SimpleCounter();
}
@Override
public <C extends Counter> C counter(int i, C c) {
throw new UnsupportedOperationException();
}
@Override
public <C extends Counter> C counter(String s, C c) {
throw new UnsupportedOperationException();
}
@Override
public <T, G extends Gauge<T>> G gauge(int i, G g) {
throw new UnsupportedOperationException();
}
@Override
public <T, G extends Gauge<T>> G gauge(String s, G g) {
throw new UnsupportedOperationException();
}
@Override
public <H extends org.apache.flink.metrics.Histogram> H histogram(String s, H h) {
throw new UnsupportedOperationException();
}
@Override
public <H extends org.apache.flink.metrics.Histogram> H histogram(int i, H h) {
throw new UnsupportedOperationException();
}
@Override
public <M extends Meter> M meter(String s, M m) {
throw new UnsupportedOperationException();
}
@Override
public <M extends Meter> M meter(int i, M m) {
throw new UnsupportedOperationException();
}
@Override
public MetricGroup addGroup(int i) {
throw new UnsupportedOperationException();
}
@Override
public MetricGroup addGroup(String s) {
throw new UnsupportedOperationException();
}
@Override
public MetricGroup addGroup(String s, String s1) {
throw new UnsupportedOperationException();
}
@Override
public String[] getScopeComponents() {
return new String[0];
}
@Override
public Map<String, String> getAllVariables() {
throw new UnsupportedOperationException();
}
@Override
public String getMetricIdentifier(String s) {
throw new UnsupportedOperationException();
}
@Override
public String getMetricIdentifier(String s, CharacterFilter characterFilter) {
throw new UnsupportedOperationException();
}
}
}
| 6,086 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/httpfn/DefaultHttpRequestReplyClientSpecTest.java | package org.apache.flink.statefun.flink.core.httpfn;
import static org.junit.Assert.*;
import java.time.Duration;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonProcessingException;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.statefun.flink.common.json.StateFunObjectMapper;
import org.hamcrest.Description;
import org.hamcrest.TypeSafeDiagnosingMatcher;
import org.junit.Test;
public class DefaultHttpRequestReplyClientSpecTest {
@Test
public void jsonSerDe() throws JsonProcessingException {
final Duration callTimeout = Duration.ofDays(1L);
final Duration connectTimeout = Duration.ofNanos(2L);
final Duration readTimeout = Duration.ofSeconds(3L);
final Duration writeTimeout = Duration.ofMillis(4L);
final DefaultHttpRequestReplyClientSpec.Timeouts timeouts =
new DefaultHttpRequestReplyClientSpec.Timeouts();
timeouts.setCallTimeout(callTimeout);
timeouts.setConnectTimeout(connectTimeout);
timeouts.setReadTimeout(readTimeout);
timeouts.setWriteTimeout(writeTimeout);
final DefaultHttpRequestReplyClientSpec defaultHttpRequestReplyClientSpec =
new DefaultHttpRequestReplyClientSpec();
defaultHttpRequestReplyClientSpec.setTimeouts(timeouts);
final ObjectMapper objectMapper = StateFunObjectMapper.create();
final ObjectNode json = defaultHttpRequestReplyClientSpec.toJson(objectMapper);
final DefaultHttpRequestReplyClientSpec deserializedHttpRequestReplyClientSpec =
DefaultHttpRequestReplyClientSpec.fromJson(objectMapper, json);
assertThat(deserializedHttpRequestReplyClientSpec.getTimeouts(), equalTimeouts(timeouts));
}
private static TypeSafeDiagnosingMatcher<DefaultHttpRequestReplyClientSpec.Timeouts>
equalTimeouts(DefaultHttpRequestReplyClientSpec.Timeouts timeouts) {
return new TimeoutsEqualityMatcher(timeouts);
}
private static class TimeoutsEqualityMatcher
extends TypeSafeDiagnosingMatcher<DefaultHttpRequestReplyClientSpec.Timeouts> {
private final DefaultHttpRequestReplyClientSpec.Timeouts expected;
private TimeoutsEqualityMatcher(DefaultHttpRequestReplyClientSpec.Timeouts timeouts) {
this.expected = timeouts;
}
@Override
protected boolean matchesSafely(
DefaultHttpRequestReplyClientSpec.Timeouts timeouts, Description description) {
boolean matching = true;
if (!timeouts.getCallTimeout().equals(expected.getCallTimeout())) {
description
.appendText("expected ")
.appendValue(expected.getCallTimeout())
.appendText(" found ")
.appendValue(timeouts.getCallTimeout());
matching = false;
}
if (!timeouts.getReadTimeout().equals(expected.getReadTimeout())) {
description
.appendText("expected ")
.appendValue(expected.getReadTimeout())
.appendText(" found ")
.appendValue(timeouts.getReadTimeout());
matching = false;
}
if (!timeouts.getWriteTimeout().equals(expected.getWriteTimeout())) {
description
.appendText("expected ")
.appendValue(expected.getWriteTimeout())
.appendText(" found ")
.appendValue(timeouts.getWriteTimeout());
matching = false;
}
if (!timeouts.getConnectTimeout().equals(expected.getConnectTimeout())) {
description
.appendText("expected ")
.appendValue(expected.getConnectTimeout())
.appendText(" found ")
.appendValue(timeouts.getConnectTimeout());
matching = false;
}
return matching;
}
@Override
public void describeTo(Description description) {
description.appendText("Matches equality of Timeouts");
}
}
}
| 6,087 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/httpfn/TransportClientTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn;
import static org.apache.flink.statefun.flink.core.TestUtils.openStreamOrThrow;
import java.io.IOException;
import java.io.InputStream;
import java.net.ServerSocket;
import java.nio.charset.StandardCharsets;
import org.apache.commons.io.IOUtils;
import org.apache.flink.shaded.netty4.io.netty.bootstrap.ServerBootstrap;
import org.apache.flink.shaded.netty4.io.netty.buffer.ByteBuf;
import org.apache.flink.shaded.netty4.io.netty.buffer.Unpooled;
import org.apache.flink.shaded.netty4.io.netty.channel.*;
import org.apache.flink.shaded.netty4.io.netty.channel.nio.NioEventLoopGroup;
import org.apache.flink.shaded.netty4.io.netty.channel.socket.nio.NioServerSocketChannel;
import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.*;
import org.apache.flink.shaded.netty4.io.netty.handler.ssl.ClientAuth;
import org.apache.flink.shaded.netty4.io.netty.handler.ssl.SslContext;
import org.apache.flink.shaded.netty4.io.netty.handler.ssl.SslContextBuilder;
import org.apache.flink.shaded.netty4.io.netty.handler.ssl.SslProvider;
import org.apache.flink.statefun.flink.common.ResourceLocator;
import org.apache.flink.statefun.flink.core.metrics.RemoteInvocationMetrics;
import org.apache.flink.statefun.flink.core.reqreply.ToFunctionRequestSummary;
import org.apache.flink.statefun.sdk.Address;
import org.apache.flink.statefun.sdk.FunctionType;
import org.apache.flink.statefun.sdk.reqreply.generated.FromFunction;
import org.apache.flink.statefun.sdk.reqreply.generated.ToFunction;
public abstract class TransportClientTest {
protected static final String A_CA_CERTS_LOCATION = "certs/a_caCerts.pem";
protected static final String A_SIGNED_CLIENT_CERT_LOCATION = "certs/a_client.crt";
protected static final String A_SIGNED_CLIENT_KEY_LOCATION = "certs/a_client.key.p8";
protected static final String A_SIGNED_SERVER_CERT_LOCATION = "certs/a_server.crt";
protected static final String A_SIGNED_SERVER_KEY_LOCATION = "certs/a_server.key.p8";
protected static final String B_CA_CERTS_LOCATION = "certs/b_caCerts.pem";
protected static final String B_SIGNED_CLIENT_CERT_LOCATION = "certs/b_client.crt";
protected static final String B_SIGNED_CLIENT_KEY_LOCATION = "certs/b_client.key.p8";
protected static final String C_SIGNED_CLIENT_CERT_LOCATION = "certs/c_client.crt";
protected static final String C_SIGNED_CLIENT_KEY_LOCATION = "certs/c_client.key.p8";
protected static final String A_SIGNED_CLIENT_KEY_PASSWORD_LOCATION = "certs/key_password.txt";
protected static final String A_SIGNED_SERVER_KEY_PASSWORD_LOCATION =
A_SIGNED_CLIENT_KEY_PASSWORD_LOCATION;
protected static final String B_SIGNED_CLIENT_KEY_PASSWORD_LOCATION =
A_SIGNED_CLIENT_KEY_PASSWORD_LOCATION;
protected static final String TLS_FAILURE_MESSAGE = "Unexpected TLS connection test result";
public static class FromFunctionNettyTestServer {
private EventLoopGroup eventLoopGroup;
private EventLoopGroup workerGroup;
public static FromFunction getStubFromFunction() {
return FromFunction.newBuilder()
.setInvocationResult(
FromFunction.InvocationResponse.newBuilder()
.addOutgoingEgresses(FromFunction.EgressMessage.newBuilder()))
.build();
}
public PortInfo runAndGetPortInfo() {
eventLoopGroup = new NioEventLoopGroup();
workerGroup = new NioEventLoopGroup();
try {
ServerBootstrap httpBootstrap = getServerBootstrap(getChannelInitializer());
ServerBootstrap httpsMutualTlsBootstrap =
getServerBootstrap(
getChannelInitializer(
openStreamOrThrow(
ResourceLocator.findNamedResource("classpath:" + A_CA_CERTS_LOCATION)),
openStreamOrThrow(
ResourceLocator.findNamedResource(
"classpath:" + A_SIGNED_SERVER_CERT_LOCATION)),
openStreamOrThrow(
ResourceLocator.findNamedResource(
"classpath:" + A_SIGNED_SERVER_KEY_LOCATION)),
openStreamOrThrow(
ResourceLocator.findNamedResource(
"classpath:" + A_SIGNED_SERVER_KEY_PASSWORD_LOCATION))));
ServerBootstrap httpsServerTlsBootstrap =
getServerBootstrap(
getChannelInitializer(
openStreamOrThrow(
ResourceLocator.findNamedResource(
"classpath:" + A_SIGNED_SERVER_CERT_LOCATION)),
openStreamOrThrow(
ResourceLocator.findNamedResource(
"classpath:" + A_SIGNED_SERVER_KEY_LOCATION)),
openStreamOrThrow(
ResourceLocator.findNamedResource(
"classpath:" + A_SIGNED_SERVER_KEY_PASSWORD_LOCATION))));
int httpPort = randomFreePort();
httpBootstrap.bind(httpPort).sync();
int httpsMutualTlsPort = randomFreePort();
httpsMutualTlsBootstrap.bind(httpsMutualTlsPort).sync();
int httpsServerTlsOnlyPort = randomFreePort();
httpsServerTlsBootstrap.bind(httpsServerTlsOnlyPort).sync();
return new PortInfo(httpPort, httpsMutualTlsPort, httpsServerTlsOnlyPort);
} catch (Exception e) {
throw new IllegalStateException("Could not start a test netty server", e);
}
}
private ChannelInitializer<Channel> getChannelInitializer(
InputStream trustInputStream,
InputStream certInputStream,
InputStream keyInputStream,
InputStream keyPasswordInputStream)
throws IOException {
String keyPassword = IOUtils.toString(keyPasswordInputStream, StandardCharsets.UTF_8);
return getTlsEnabledInitializer(
SslContextBuilder.forServer(certInputStream, keyInputStream, keyPassword)
.trustManager(trustInputStream),
ClientAuth.REQUIRE);
}
private ChannelInitializer<Channel> getChannelInitializer(
InputStream certInputStream, InputStream keyInputStream, InputStream keyPasswordInputStream)
throws IOException {
String keyPassword = IOUtils.toString(keyPasswordInputStream, StandardCharsets.UTF_8);
return getTlsEnabledInitializer(
SslContextBuilder.forServer(certInputStream, keyInputStream, keyPassword),
ClientAuth.NONE);
}
private ChannelInitializer<Channel> getChannelInitializer() {
return new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel channel) {
addStubResponseToThePipeline(channel.pipeline());
}
};
}
private ChannelInitializer<Channel> getTlsEnabledInitializer(
SslContextBuilder sslContextBuilder, ClientAuth clientAuth) {
return new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel channel) throws IOException {
ChannelPipeline pipeline = channel.pipeline();
SslContext sslContext =
sslContextBuilder.sslProvider(SslProvider.JDK).clientAuth(clientAuth).build();
pipeline.addLast(sslContext.newHandler(channel.alloc()));
addStubResponseToThePipeline(pipeline);
}
};
}
public void close() throws InterruptedException {
eventLoopGroup.shutdownGracefully().sync();
workerGroup.shutdownGracefully().sync();
}
private ServerBootstrap getServerBootstrap(ChannelInitializer<Channel> childHandler) {
return new ServerBootstrap()
.group(eventLoopGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.childHandler(childHandler)
.option(ChannelOption.SO_BACKLOG, 128)
.childOption(ChannelOption.SO_KEEPALIVE, true);
}
private void addStubResponseToThePipeline(ChannelPipeline pipeline) {
pipeline.addLast(new HttpServerCodec());
pipeline.addLast(new HttpObjectAggregator(Integer.MAX_VALUE));
pipeline.addLast(stubFromFunctionHandler());
}
private SimpleChannelInboundHandler<FullHttpRequest> stubFromFunctionHandler() {
return new SimpleChannelInboundHandler<FullHttpRequest>() {
@Override
protected void channelRead0(
ChannelHandlerContext channelHandlerContext, FullHttpRequest fullHttpRequest) {
ByteBuf content = Unpooled.copiedBuffer(getStubFromFunction().toByteArray());
FullHttpResponse response =
new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK, content);
response.headers().set(HttpHeaderNames.CONTENT_TYPE, "application/octet-stream");
response.headers().set(HttpHeaderNames.CONTENT_LENGTH, content.readableBytes());
channelHandlerContext.write(response);
channelHandlerContext.flush();
}
};
}
private int randomFreePort() {
try (ServerSocket socket = new ServerSocket(0)) {
return socket.getLocalPort();
} catch (IOException e) {
throw new IllegalStateException(
"No free ports available for the test netty service to use");
}
}
public static class PortInfo {
private final int httpPort;
private final int httpsMutualTlsRequiredPort;
private final int httpsServerTlsOnlyPort;
public PortInfo(int httpPort, int httpsMutualTlsRequiredPort, int httpsServerTlsOnlyPort) {
this.httpPort = httpPort;
this.httpsMutualTlsRequiredPort = httpsMutualTlsRequiredPort;
this.httpsServerTlsOnlyPort = httpsServerTlsOnlyPort;
}
public int getHttpPort() {
return httpPort;
}
public int getHttpsMutualTlsRequiredPort() {
return httpsMutualTlsRequiredPort;
}
public int getHttpsServerTlsOnlyPort() {
return httpsServerTlsOnlyPort;
}
}
public static ToFunctionRequestSummary getStubRequestSummary() {
return new ToFunctionRequestSummary(
new Address(new FunctionType("ns", "type"), "id"), 1, 0, 1);
}
public static ToFunction getEmptyToFunction() {
return ToFunction.newBuilder().build();
}
public static RemoteInvocationMetrics getFakeMetrics() {
return new RemoteInvocationMetrics() {
@Override
public void remoteInvocationFailures() {}
@Override
public void remoteInvocationLatency(long elapsed) {}
};
}
}
}
| 6,088 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/httpfn/UnixDomainHttpEndpointTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import java.net.URI;
import org.junit.Test;
public class UnixDomainHttpEndpointTest {
@Test
public void splitOnlyWithFile() {
UnixDomainHttpEndpoint out =
UnixDomainHttpEndpoint.parseFrom(URI.create("http+unix:///some/path.sock"));
assertEquals("/some/path.sock", out.unixDomainFile.toString());
assertEquals("/", out.pathSegment);
}
@Test
public void splitOnlyWithFileAndEndpoint() {
UnixDomainHttpEndpoint out =
UnixDomainHttpEndpoint.parseFrom(URI.create("http+unix:///some/path.sock/hello"));
assertEquals("/some/path.sock", out.unixDomainFile.toString());
assertEquals("/hello", out.pathSegment);
}
@Test(expected = IllegalStateException.class)
public void missingSockFile() {
UnixDomainHttpEndpoint.parseFrom(URI.create("http+unix:///some/path/hello"));
}
@Test
public void validateUdsEndpoint() {
assertFalse(UnixDomainHttpEndpoint.validate(URI.create("http:///bar.foo.com/some/path")));
}
@Test(expected = IllegalArgumentException.class)
public void parseNonUdsEndpoint() {
UnixDomainHttpEndpoint.parseFrom(URI.create("http:///bar.foo.com/some/path"));
}
}
| 6,089 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/httpfn/UnixDomainSocketITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.net.InetAddress;
import java.net.ServerSocket;
import javax.net.ServerSocketFactory;
import okhttp3.OkHttpClient;
import okhttp3.OkHttpClient.Builder;
import okhttp3.Request;
import okhttp3.Response;
import okhttp3.mockwebserver.MockResponse;
import okhttp3.mockwebserver.MockWebServer;
import org.junit.Test;
import org.newsclub.net.unix.AFUNIXServerSocket;
import org.newsclub.net.unix.AFUNIXSocketAddress;
public class UnixDomainSocketITCase {
@Test(timeout = 10 * 1_000)
public void unixDomainSocket() throws IOException {
final File sockFile = new File("/tmp/uds-" + System.nanoTime() + ".sock");
sockFile.deleteOnExit();
try (MockWebServer server = new MockWebServer()) {
server.setServerSocketFactory(udsSocketFactory(sockFile));
server.enqueue(new MockResponse().setBody("hi"));
server.start();
OkHttpClient client = udsSocketClient(sockFile);
Response response = request(client);
assertTrue(response.isSuccessful());
assertThat(response.body(), is(notNullValue()));
assertThat(response.body().string(), is("hi"));
}
}
private static Response request(OkHttpClient client) throws IOException {
Request request = new Request.Builder().url("http://unused/").build();
return client.newCall(request).execute();
}
/** returns an {@link OkHttpClient} that connects trough the provided socket file. */
private static OkHttpClient udsSocketClient(File sockFile) {
Builder sharedClient = OkHttpUtils.newClient().newBuilder();
OkHttpUnixSocketBridge.configureUnixDomainSocket(sharedClient, sockFile);
return sharedClient.build();
}
private static ServerSocketFactory udsSocketFactory(File sockFile) {
return new ServerSocketFactory() {
@Override
public ServerSocket createServerSocket() throws IOException {
return AFUNIXServerSocket.forceBindOn(new AFUNIXSocketAddress(sockFile));
}
@Override
public ServerSocket createServerSocket(int i) throws IOException {
return createServerSocket();
}
@Override
public ServerSocket createServerSocket(int i, int i1) throws IOException {
return createServerSocket();
}
@Override
public ServerSocket createServerSocket(int i, int i1, InetAddress inetAddress)
throws IOException {
return createServerSocket();
}
};
}
}
| 6,090 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/httpfn/binders | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/httpfn/binders/v1/HttpEndpointBinderV1Test.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn.binders.v1;
import static org.hamcrest.Matchers.hasKey;
import static org.junit.Assert.assertThat;
import java.net.URL;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
import org.apache.flink.statefun.extensions.ComponentJsonObject;
import org.apache.flink.statefun.flink.core.StatefulFunctionsUniverse;
import org.apache.flink.statefun.flink.core.httpfn.DefaultHttpRequestReplyClientFactory;
import org.apache.flink.statefun.flink.core.httpfn.TransportClientConstants;
import org.apache.flink.statefun.flink.core.message.MessageFactoryKey;
import org.apache.flink.statefun.flink.core.message.MessageFactoryType;
import org.junit.Test;
public final class HttpEndpointBinderV1Test {
private static final ObjectMapper OBJ_MAPPER = new ObjectMapper(new YAMLFactory());
private static final String SPEC_YAML_PATH = "http-endpoint-binders/v1.yaml";
@Test
public void exampleUsage() throws Exception {
final ComponentJsonObject component = loadComponentJsonObject(SPEC_YAML_PATH);
final StatefulFunctionsUniverse universe = testUniverse();
HttpEndpointBinderV1.INSTANCE.bind(component, universe);
assertThat(universe.namespaceFunctions(), hasKey("com.foo.bar"));
}
private static ComponentJsonObject loadComponentJsonObject(String yamlPath) throws Exception {
final URL url = HttpEndpointBinderV1Test.class.getClassLoader().getResource(yamlPath);
final ObjectNode componentObject = OBJ_MAPPER.readValue(url, ObjectNode.class);
return new ComponentJsonObject(componentObject);
}
private static StatefulFunctionsUniverse testUniverse() {
final StatefulFunctionsUniverse universe =
new StatefulFunctionsUniverse(
MessageFactoryKey.forType(MessageFactoryType.WITH_PROTOBUF_PAYLOADS, null));
universe.bindExtension(
TransportClientConstants.OKHTTP_CLIENT_FACTORY_TYPE,
DefaultHttpRequestReplyClientFactory.INSTANCE);
return universe;
}
}
| 6,091 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/httpfn/binders | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/test/java/org/apache/flink/statefun/flink/core/httpfn/binders/v2/HttpEndpointBinderV2Test.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core.httpfn.binders.v2;
import static org.hamcrest.Matchers.hasKey;
import static org.junit.Assert.assertThat;
import java.net.URL;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
import org.apache.flink.statefun.extensions.ComponentJsonObject;
import org.apache.flink.statefun.flink.core.StatefulFunctionsUniverse;
import org.apache.flink.statefun.flink.core.httpfn.DefaultHttpRequestReplyClientFactory;
import org.apache.flink.statefun.flink.core.httpfn.TransportClientConstants;
import org.apache.flink.statefun.flink.core.message.MessageFactoryKey;
import org.apache.flink.statefun.flink.core.message.MessageFactoryType;
import org.junit.Test;
public final class HttpEndpointBinderV2Test {
private static final ObjectMapper OBJ_MAPPER = new ObjectMapper(new YAMLFactory());
private static final String SPEC_YAML_PATH = "http-endpoint-binders/v2.yaml";
@Test
public void exampleUsage() throws Exception {
final ComponentJsonObject component = loadComponentJsonObject(SPEC_YAML_PATH);
final StatefulFunctionsUniverse universe = testUniverse();
HttpEndpointBinderV2.INSTANCE.bind(component, universe);
assertThat(universe.namespaceFunctions(), hasKey("com.foo.bar"));
}
private static ComponentJsonObject loadComponentJsonObject(String yamlPath) throws Exception {
final URL url = HttpEndpointBinderV2Test.class.getClassLoader().getResource(yamlPath);
final ObjectNode componentObject = OBJ_MAPPER.readValue(url, ObjectNode.class);
return new ComponentJsonObject(componentObject);
}
private static StatefulFunctionsUniverse testUniverse() {
final StatefulFunctionsUniverse universe =
new StatefulFunctionsUniverse(
MessageFactoryKey.forType(MessageFactoryType.WITH_PROTOBUF_PAYLOADS, null));
universe.bindExtension(
TransportClientConstants.OKHTTP_CLIENT_FACTORY_TYPE,
DefaultHttpRequestReplyClientFactory.INSTANCE);
return universe;
}
}
| 6,092 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/StatefulFunctionsConfigValidator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core;
import java.util.*;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.ConfigOptions;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.CoreOptions;
import org.apache.flink.statefun.flink.core.exceptions.StatefulFunctionsInvalidConfigException;
import org.apache.flink.statefun.flink.core.message.MessageFactoryType;
import org.apache.flink.util.StringUtils;
public final class StatefulFunctionsConfigValidator {
private StatefulFunctionsConfigValidator() {}
public static final List<String> PARENT_FIRST_CLASSLOADER_PATTERNS =
Collections.unmodifiableList(
Arrays.asList("org.apache.flink.statefun", "org.apache.kafka", "com.google.protobuf"));
public static final int MAX_CONCURRENT_CHECKPOINTS = 1;
static void validate(boolean isEmbedded, Configuration configuration) {
if (!isEmbedded) {
validateParentFirstClassloaderPatterns(configuration);
}
validateCustomPayloadSerializerClassName(configuration);
validateNoHeapBackedTimers(configuration);
validateUnalignedCheckpointsDisabled(configuration);
}
private static void validateParentFirstClassloaderPatterns(Configuration configuration) {
final Set<String> parentFirstClassloaderPatterns =
parentFirstClassloaderPatterns(configuration);
if (!parentFirstClassloaderPatterns.containsAll(PARENT_FIRST_CLASSLOADER_PATTERNS)) {
throw new StatefulFunctionsInvalidConfigException(
CoreOptions.ALWAYS_PARENT_FIRST_LOADER_PATTERNS_ADDITIONAL,
"Must contain all of " + String.join(", ", PARENT_FIRST_CLASSLOADER_PATTERNS));
}
}
private static Set<String> parentFirstClassloaderPatterns(Configuration configuration) {
final List<String> patterns =
configuration.get(CoreOptions.ALWAYS_PARENT_FIRST_LOADER_PATTERNS_ADDITIONAL);
final Set<String> parentFirstClassloaderPatterns = new HashSet<>(patterns.size());
for (String s : patterns) {
parentFirstClassloaderPatterns.add(s.trim().toLowerCase(Locale.ENGLISH));
}
return parentFirstClassloaderPatterns;
}
private static void validateCustomPayloadSerializerClassName(Configuration configuration) {
final MessageFactoryType factoryType =
configuration.get(StatefulFunctionsConfig.USER_MESSAGE_SERIALIZER);
final String customPayloadSerializerClassName =
configuration.get(StatefulFunctionsConfig.USER_MESSAGE_CUSTOM_PAYLOAD_SERIALIZER_CLASS);
if (factoryType == MessageFactoryType.WITH_CUSTOM_PAYLOADS) {
if (StringUtils.isNullOrWhitespaceOnly(customPayloadSerializerClassName)) {
throw new StatefulFunctionsInvalidConfigException(
StatefulFunctionsConfig.USER_MESSAGE_CUSTOM_PAYLOAD_SERIALIZER_CLASS,
"custom payload serializer class must be supplied with WITH_CUSTOM_PAYLOADS serializer");
}
} else {
if (customPayloadSerializerClassName != null) {
throw new StatefulFunctionsInvalidConfigException(
StatefulFunctionsConfig.USER_MESSAGE_CUSTOM_PAYLOAD_SERIALIZER_CLASS,
"custom payload serializer class may only be supplied with WITH_CUSTOM_PAYLOADS serializer");
}
}
}
private static final ConfigOption<String> TIMER_SERVICE_FACTORY =
ConfigOptions.key("state.backend.rocksdb.timer-service.factory")
.stringType()
.defaultValue("rocksdb");
private static final ConfigOption<Boolean> ENABLE_UNALIGNED_CHECKPOINTS =
ConfigOptions.key("execution.checkpointing.unaligned").booleanType().defaultValue(false);
private static void validateNoHeapBackedTimers(Configuration configuration) {
final String timerFactory = configuration.getString(TIMER_SERVICE_FACTORY);
if (!timerFactory.equalsIgnoreCase("rocksdb")) {
throw new StatefulFunctionsInvalidConfigException(
TIMER_SERVICE_FACTORY,
"StateFun only supports non-heap timers with a rocksdb state backend.");
}
}
private static void validateUnalignedCheckpointsDisabled(Configuration configuration) {
final boolean unalignedCheckpoints = configuration.getBoolean(ENABLE_UNALIGNED_CHECKPOINTS);
if (unalignedCheckpoints) {
throw new StatefulFunctionsInvalidConfigException(
ENABLE_UNALIGNED_CHECKPOINTS,
"StateFun currently does not support unaligned checkpointing.");
}
}
}
| 6,093 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/StatefulFunctionsUniverseValidator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core;
final class StatefulFunctionsUniverseValidator {
void validate(StatefulFunctionsUniverse statefulFunctionsUniverse) {
// TODO: complete this
if (statefulFunctionsUniverse.ingress().isEmpty()) {
throw new IllegalStateException("There are no ingress defined.");
}
if (statefulFunctionsUniverse.sources().isEmpty()) {
throw new IllegalStateException("There are no source providers defined.");
}
if (statefulFunctionsUniverse.routers().isEmpty()) {
throw new IllegalStateException("There are no routers defined.");
}
if (statefulFunctionsUniverse.functions().isEmpty()
&& statefulFunctionsUniverse.namespaceFunctions().isEmpty()) {
throw new IllegalStateException("There are no function providers defined.");
}
}
}
| 6,094 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/StatefulFunctionsJob.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core;
import java.net.URL;
import java.net.URLClassLoader;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.statefun.flink.core.feedback.FeedbackKey;
import org.apache.flink.statefun.flink.core.message.Message;
import org.apache.flink.statefun.flink.core.translation.FlinkUniverse;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.FlinkUserCodeClassLoader;
import org.apache.flink.util.FlinkUserCodeClassLoaders;
public class StatefulFunctionsJob {
private static final AtomicInteger FEEDBACK_INVOCATION_ID_SEQ = new AtomicInteger();
public static void main(String... args) throws Exception {
ParameterTool argsParameterTool = ParameterTool.fromArgs(args);
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
Configuration flinkConfig = FlinkConfigExtractor.reflectivelyExtractFromEnv(env);
StatefulFunctionsConfig stateFunConfig =
StatefulFunctionsConfig.fromFlinkConfiguration(
ParameterTool.fromMap(flinkConfig.toMap())
.mergeWith(argsParameterTool)
.getConfiguration());
stateFunConfig.addAllGlobalConfigurations(argsParameterTool.toMap());
stateFunConfig.setProvider(new StatefulFunctionsUniverses.ClassPathUniverseProvider());
StatefulFunctionsConfigValidator.validate(stateFunConfig.isEmbedded(), flinkConfig);
main(env, stateFunConfig);
}
/**
* The main entry point for executing a stateful functions application.
*
* @param env The StreamExecutionEnvironment under which the application will be bound.
* @param stateFunConfig The stateful function specific configurations for the deployment.
*/
public static void main(StreamExecutionEnvironment env, StatefulFunctionsConfig stateFunConfig)
throws Exception {
Objects.requireNonNull(env);
Objects.requireNonNull(stateFunConfig);
setDefaultContextClassLoaderIfAbsent();
env.getConfig().enableObjectReuse();
final StatefulFunctionsUniverse statefulFunctionsUniverse =
StatefulFunctionsUniverses.get(
Thread.currentThread().getContextClassLoader(), stateFunConfig);
final StatefulFunctionsUniverseValidator statefulFunctionsUniverseValidator =
new StatefulFunctionsUniverseValidator();
statefulFunctionsUniverseValidator.validate(statefulFunctionsUniverse);
FeedbackKey<Message> feedbackKey =
new FeedbackKey<>("statefun-pipeline", FEEDBACK_INVOCATION_ID_SEQ.incrementAndGet());
FlinkUniverse flinkUniverse =
new FlinkUniverse(feedbackKey, stateFunConfig, statefulFunctionsUniverse);
flinkUniverse.configure(env);
env.execute(stateFunConfig.getFlinkJobName());
}
private static void setDefaultContextClassLoaderIfAbsent() {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
if (classLoader == null) {
URLClassLoader flinkClassLoader =
FlinkUserCodeClassLoaders.parentFirst(
new URL[0],
StatefulFunctionsJob.class.getClassLoader(),
FlinkUserCodeClassLoader.NOOP_EXCEPTION_HANDLER,
false);
Thread.currentThread().setContextClassLoader(flinkClassLoader);
}
}
}
| 6,095 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/StatefulFunctionsUniverse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import javax.annotation.Nullable;
import org.apache.flink.statefun.extensions.ExtensionModule;
import org.apache.flink.statefun.flink.core.message.MessageFactoryKey;
import org.apache.flink.statefun.flink.core.spi.ExtensionResolver;
import org.apache.flink.statefun.flink.core.types.StaticallyRegisteredTypes;
import org.apache.flink.statefun.flink.io.spi.FlinkIoModule;
import org.apache.flink.statefun.flink.io.spi.SinkProvider;
import org.apache.flink.statefun.flink.io.spi.SourceProvider;
import org.apache.flink.statefun.sdk.EgressType;
import org.apache.flink.statefun.sdk.FunctionType;
import org.apache.flink.statefun.sdk.FunctionTypeNamespaceMatcher;
import org.apache.flink.statefun.sdk.IngressType;
import org.apache.flink.statefun.sdk.StatefulFunctionProvider;
import org.apache.flink.statefun.sdk.TypeName;
import org.apache.flink.statefun.sdk.io.EgressIdentifier;
import org.apache.flink.statefun.sdk.io.EgressSpec;
import org.apache.flink.statefun.sdk.io.IngressIdentifier;
import org.apache.flink.statefun.sdk.io.IngressSpec;
import org.apache.flink.statefun.sdk.io.Router;
import org.apache.flink.statefun.sdk.spi.StatefulFunctionModule;
public final class StatefulFunctionsUniverse
implements StatefulFunctionModule.Binder,
FlinkIoModule.Binder,
ExtensionModule.Binder,
ExtensionResolver {
private final Map<IngressIdentifier<?>, IngressSpec<?>> ingress = new HashMap<>();
private final Map<EgressIdentifier<?>, EgressSpec<?>> egress = new HashMap<>();
private final Map<IngressIdentifier<?>, List<Router<?>>> routers = new HashMap<>();
private final Map<FunctionType, StatefulFunctionProvider> specificFunctionProviders =
new HashMap<>();
private final Map<String, StatefulFunctionProvider> namespaceFunctionProviders = new HashMap<>();
private final Map<IngressType, SourceProvider> sources = new HashMap<>();
private final Map<EgressType, SinkProvider> sinks = new HashMap<>();
private final Map<TypeName, Object> extensions = new HashMap<>();
private final StaticallyRegisteredTypes types;
private final MessageFactoryKey messageFactoryKey;
public StatefulFunctionsUniverse(MessageFactoryKey messageFactoryKey) {
this.messageFactoryKey = messageFactoryKey;
this.types = new StaticallyRegisteredTypes(messageFactoryKey);
}
@Override
public <T> void bindIngress(IngressSpec<T> spec) {
Objects.requireNonNull(spec);
IngressIdentifier<T> id = spec.id();
putAndThrowIfPresent(ingress, id, spec);
}
@Override
public <T> void bindIngressRouter(IngressIdentifier<T> ingressIdentifier, Router<T> router) {
Objects.requireNonNull(ingressIdentifier);
Objects.requireNonNull(router);
List<Router<?>> ingressRouters =
routers.computeIfAbsent(ingressIdentifier, unused -> new ArrayList<>());
ingressRouters.add(router);
}
@Override
public <T> void bindEgress(EgressSpec<T> spec) {
Objects.requireNonNull(spec);
EgressIdentifier<T> id = spec.id();
putAndThrowIfPresent(egress, id, spec);
}
@Override
public void bindFunctionProvider(FunctionType functionType, StatefulFunctionProvider provider) {
Objects.requireNonNull(functionType);
Objects.requireNonNull(provider);
putAndThrowIfPresent(specificFunctionProviders, functionType, provider);
}
@Override
public void bindFunctionProvider(
FunctionTypeNamespaceMatcher namespaceMatcher, StatefulFunctionProvider provider) {
Objects.requireNonNull(namespaceMatcher);
Objects.requireNonNull(provider);
putAndThrowIfPresent(namespaceFunctionProviders, namespaceMatcher.targetNamespace(), provider);
}
@Override
public void bindSourceProvider(IngressType type, SourceProvider provider) {
Objects.requireNonNull(type);
Objects.requireNonNull(provider);
putAndThrowIfPresent(sources, type, provider);
}
@Override
public void bindSinkProvider(EgressType type, SinkProvider provider) {
putAndThrowIfPresent(sinks, type, provider);
}
@Override
public <T> void bindExtension(TypeName typeName, T extension) {
putAndThrowIfPresent(extensions, typeName, extension);
}
@Override
public <T> T resolveExtension(TypeName typeName, Class<T> extensionClass) {
final Object rawTypedExtension = extensions.get(typeName);
if (rawTypedExtension == null) {
throw new IllegalStateException("An extension with type " + typeName + " does not exist.");
}
if (rawTypedExtension.getClass().isAssignableFrom(extensionClass)) {
throw new IllegalStateException(
"Unexpected class for extension "
+ typeName
+ "; expected "
+ extensionClass
+ ", but was "
+ rawTypedExtension.getClass());
}
return extensionClass.cast(rawTypedExtension);
}
public Map<IngressIdentifier<?>, IngressSpec<?>> ingress() {
return ingress;
}
public Map<EgressIdentifier<?>, EgressSpec<?>> egress() {
return egress;
}
public Map<IngressIdentifier<?>, List<Router<?>>> routers() {
return routers;
}
public Map<FunctionType, StatefulFunctionProvider> functions() {
return specificFunctionProviders;
}
public Map<String, StatefulFunctionProvider> namespaceFunctions() {
return namespaceFunctionProviders;
}
public Map<IngressType, SourceProvider> sources() {
return sources;
}
public Map<EgressType, SinkProvider> sinks() {
return sinks;
}
public Map<TypeName, Object> getExtensions() {
return extensions;
}
public StaticallyRegisteredTypes types() {
return types;
}
private static <K, V> void putAndThrowIfPresent(Map<K, V> map, K key, V value) {
@Nullable V previous = map.put(key, value);
if (previous == null) {
return;
}
throw new IllegalStateException(
String.format("A binding for the key %s was previously defined.", key));
}
public MessageFactoryKey messageFactoryKey() {
return messageFactoryKey;
}
}
| 6,096 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/StatefulFunctionsUniverses.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core;
import org.apache.flink.statefun.flink.core.spi.Modules;
import org.apache.flink.util.Preconditions;
public final class StatefulFunctionsUniverses {
public static StatefulFunctionsUniverse get(
ClassLoader classLoader, StatefulFunctionsConfig configuration) {
Preconditions.checkState(classLoader != null, "The class loader was not set.");
Preconditions.checkState(configuration != null, "The configuration was not set.");
StatefulFunctionsUniverseProvider provider = configuration.getProvider(classLoader);
return provider.get(classLoader, configuration);
}
static final class ClassPathUniverseProvider implements StatefulFunctionsUniverseProvider {
private static final long serialVersionUID = 1;
@Override
public StatefulFunctionsUniverse get(
ClassLoader classLoader, StatefulFunctionsConfig configuration) {
Modules modules = Modules.loadFromClassPath(configuration);
return modules.createStatefulFunctionsUniverse();
}
}
}
| 6,097 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/StatefulFunctionsUniverseProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core;
import java.io.Serializable;
public interface StatefulFunctionsUniverseProvider extends Serializable {
StatefulFunctionsUniverse get(ClassLoader classLoader, StatefulFunctionsConfig configuration);
}
| 6,098 |
0 | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink | Create_ds/flink-statefun/statefun-flink/statefun-flink-core/src/main/java/org/apache/flink/statefun/flink/core/StatefulFunctionsConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.core;
import static org.apache.flink.configuration.description.TextElement.code;
import java.io.IOException;
import java.io.Serializable;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.ConfigOptions;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.configuration.description.Description;
import org.apache.flink.statefun.flink.core.message.MessageFactoryKey;
import org.apache.flink.statefun.flink.core.message.MessageFactoryType;
import org.apache.flink.statefun.sdk.spi.StatefulFunctionModule;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.InstantiationUtil;
/** Configuration that captures all stateful function related settings. */
@SuppressWarnings("WeakerAccess")
public class StatefulFunctionsConfig implements Serializable {
private static final long serialVersionUID = 1L;
public static final String MODULE_CONFIG_PREFIX = "statefun.module.global-config.";
// This configuration option exists for the documentation generator
@SuppressWarnings("unused")
public static final ConfigOption<String> MODULE_GLOBAL_DEFAULT =
ConfigOptions.key(MODULE_CONFIG_PREFIX + "<KEY>")
.stringType()
.noDefaultValue()
.withDescription(
Description.builder()
.text(
"Adds the given key/value pair to the Stateful Functions global configuration.")
.text(
"These values will be available via the `globalConfigurations` parameter of StatefulFunctionModule#configure.")
.linebreak()
.text(
"Only the key <KEY> and value are added to the configuration. If the key/value pairs")
.list(
code(MODULE_CONFIG_PREFIX + "key1: value1"),
code(MODULE_CONFIG_PREFIX + "key2: value2"))
.text("are set, then the map")
.list(code("key1: value1"), code("key2: value2"))
.text("will be made available to your module at runtime.")
.build());
public static final ConfigOption<MessageFactoryType> USER_MESSAGE_SERIALIZER =
ConfigOptions.key("statefun.message.serializer")
.enumType(MessageFactoryType.class)
.defaultValue(MessageFactoryType.WITH_PROTOBUF_PAYLOADS)
.withDescription("The serializer to use for on the wire messages.");
public static final ConfigOption<String> USER_MESSAGE_CUSTOM_PAYLOAD_SERIALIZER_CLASS =
ConfigOptions.key("statefun.message.custom-payload-serializer-class")
.stringType()
.noDefaultValue()
.withDescription(
"The custom payload serializer class to use with the WITH_CUSTOM_PAYLOADS serializer, which must implement MessagePayloadSerializer.");
public static final ConfigOption<String> FLINK_JOB_NAME =
ConfigOptions.key("statefun.flink-job-name")
.stringType()
.defaultValue("StatefulFunctions")
.withDescription("The name to display at the Flink-UI");
public static final ConfigOption<MemorySize> TOTAL_MEMORY_USED_FOR_FEEDBACK_CHECKPOINTING =
ConfigOptions.key("statefun.feedback.memory.size")
.memoryType()
.defaultValue(MemorySize.ofMebiBytes(32))
.withDescription(
"The number of bytes to use for in memory buffering of the feedback channel, before spilling to disk.");
public static final ConfigOption<Integer> ASYNC_MAX_OPERATIONS_PER_TASK =
ConfigOptions.key("statefun.async.max-per-task")
.intType()
.defaultValue(32 * 1024)
.withDescription(
"The max number of async operations per task before backpressure is applied.");
public static final ConfigOption<String> REMOTE_MODULE_NAME =
ConfigOptions.key("statefun.remote.module-name")
.stringType()
.defaultValue("classpath:module.yaml")
.withDescription(
"The name of the remote module entity to look for. Also supported, file:///...");
public static final ConfigOption<Boolean> EMBEDDED =
ConfigOptions.key("statefun.embedded")
.booleanType()
.defaultValue(false)
.withDescription(
"True if Flink is running this job from an uber jar, rather than using statefun-specific docker images");
/**
* Creates a new {@link StatefulFunctionsConfig} based on the default configurations in the
* current environment set via the {@code flink-conf.yaml}.
*/
public static StatefulFunctionsConfig fromEnvironment(StreamExecutionEnvironment env) {
Configuration configuration = FlinkConfigExtractor.reflectivelyExtractFromEnv(env);
return new StatefulFunctionsConfig(configuration);
}
public static StatefulFunctionsConfig fromFlinkConfiguration(Configuration flinkConfiguration) {
return new StatefulFunctionsConfig(flinkConfiguration);
}
private MessageFactoryType factoryType;
private String customPayloadSerializerClassName;
private String flinkJobName;
private byte[] universeInitializerClassBytes;
private MemorySize feedbackBufferSize;
private int maxAsyncOperationsPerTask;
private String remoteModuleName;
private boolean embedded;
private final Map<String, String> globalConfigurations = new HashMap<>();
/**
* Create a new configuration object based on the values set in flink-conf.
*
* @param configuration a configuration to read the values from
*/
private StatefulFunctionsConfig(Configuration configuration) {
this.factoryType = configuration.get(USER_MESSAGE_SERIALIZER);
this.customPayloadSerializerClassName =
configuration.get(USER_MESSAGE_CUSTOM_PAYLOAD_SERIALIZER_CLASS);
this.flinkJobName = configuration.get(FLINK_JOB_NAME);
this.feedbackBufferSize = configuration.get(TOTAL_MEMORY_USED_FOR_FEEDBACK_CHECKPOINTING);
this.maxAsyncOperationsPerTask = configuration.get(ASYNC_MAX_OPERATIONS_PER_TASK);
this.remoteModuleName = configuration.get(REMOTE_MODULE_NAME);
this.embedded = configuration.getBoolean(EMBEDDED);
for (String key : configuration.keySet()) {
if (key.startsWith(MODULE_CONFIG_PREFIX)) {
String value = configuration.get(ConfigOptions.key(key).stringType().noDefaultValue());
String userKey = key.substring(MODULE_CONFIG_PREFIX.length());
globalConfigurations.put(userKey, value);
}
}
}
/** Returns the factory type used to serialize messages. */
public MessageFactoryType getFactoryType() {
return factoryType;
}
/**
* Returns the custom payload serializer class name, when factory type is WITH_CUSTOM_PAYLOADS *
*/
public String getCustomPayloadSerializerClassName() {
return customPayloadSerializerClassName;
}
/** Returns the factory key * */
public MessageFactoryKey getFactoryKey() {
return MessageFactoryKey.forType(this.factoryType, this.customPayloadSerializerClassName);
}
/** Sets the factory type used to serialize messages. */
public void setFactoryType(MessageFactoryType factoryType) {
this.factoryType = Objects.requireNonNull(factoryType);
}
/** Sets the custom payload serializer class name * */
public void setCustomPayloadSerializerClassName(String customPayloadSerializerClassName) {
this.customPayloadSerializerClassName = customPayloadSerializerClassName;
}
/** Returns the Flink job name that appears in the Web UI. */
public String getFlinkJobName() {
return flinkJobName;
}
/** Set the Flink job name that appears in the Web UI. */
public void setFlinkJobName(String flinkJobName) {
this.flinkJobName = Objects.requireNonNull(flinkJobName);
}
/** Returns the number of bytes to use for in memory buffering of the feedback channel. */
public MemorySize getFeedbackBufferSize() {
return feedbackBufferSize;
}
/** Sets the number of bytes to use for in memory buffering of the feedback channel. */
public void setFeedbackBufferSize(MemorySize size) {
this.feedbackBufferSize = Objects.requireNonNull(size);
}
/** Returns the max async operations allowed per task. */
public int getMaxAsyncOperationsPerTask() {
return maxAsyncOperationsPerTask;
}
/** Sets the max async operations allowed per task. */
public void setMaxAsyncOperationsPerTask(int maxAsyncOperationsPerTask) {
this.maxAsyncOperationsPerTask = maxAsyncOperationsPerTask;
}
/** Returns the remote module name. */
public String getRemoteModuleName() {
return remoteModuleName;
}
/**
* Sets a template for the remote module name.
*
* <p>By default the system will look for module.yaml in the classapth, to override that use
* either a configuration parameter (see {@linkplain #REMOTE_MODULE_NAME}) or this getter.
*
* <p>The supported formats are either a file path, a file path prefixed with a {@code file:}
* schema, or a name prefixed by {@code classpath:}
*/
public void setRemoteModuleName(String remoteModuleName) {
this.remoteModuleName = Objects.requireNonNull(remoteModuleName);
}
/** Returns whether the job was launched in embedded mode (see {@linkplain #EMBEDDED}). */
public boolean isEmbedded() {
return embedded;
}
/**
* Sets the embedded mode. If true, disables certain validation steps. See documentation:
* Configurations.
*/
public void setEmbedded(boolean embedded) {
this.embedded = embedded;
}
/**
* Retrieves the universe provider for loading modules.
*
* @param cl The classloader on which the provider class is located.
* @return A {@link StatefulFunctionsUniverseProvider}.
*/
public StatefulFunctionsUniverseProvider getProvider(ClassLoader cl) {
try {
return InstantiationUtil.deserializeObject(universeInitializerClassBytes, cl, false);
} catch (IOException | ClassNotFoundException e) {
throw new IllegalStateException("Unable to initialize.", e);
}
}
/** Sets the universe provider used to load modules. */
public void setProvider(StatefulFunctionsUniverseProvider provider) {
try {
universeInitializerClassBytes = InstantiationUtil.serializeObject(provider);
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
/**
* Returns the global configurations passed to {@link
* org.apache.flink.statefun.sdk.spi.StatefulFunctionModule#configure(Map,
* StatefulFunctionModule.Binder)}.
*/
public Map<String, String> getGlobalConfigurations() {
return Collections.unmodifiableMap(globalConfigurations);
}
/** Adds all entries in this to the global configuration. */
public void addAllGlobalConfigurations(Map<String, String> globalConfigurations) {
this.globalConfigurations.putAll(globalConfigurations);
}
/**
* Adds the given key/value pair to the global configuration.
*
* @param key the key of the key/value pair to be added
* @param value the value of the key/value pair to be added
*/
public void setGlobalConfiguration(String key, String value) {
this.globalConfigurations.put(key, value);
}
}
| 6,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.