index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/AsyncWaitOperatorBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.benchmark.functions.LongSource;
import org.apache.flink.streaming.api.datastream.AsyncDataStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.async.ResultFuture;
import org.apache.flink.streaming.api.functions.async.RichAsyncFunction;
import org.apache.flink.streaming.api.functions.sink.DiscardingSink;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import java.util.Collections;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
@OperationsPerInvocation(value = AsyncWaitOperatorBenchmark.RECORDS_PER_INVOCATION)
public class AsyncWaitOperatorBenchmark extends BenchmarkBase {
public static final int RECORDS_PER_INVOCATION = 1_000_000;
private static final long CHECKPOINT_INTERVAL_MS = 100;
private static ExecutorService executor;
@Param public AsyncDataStream.OutputMode outputMode;
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(".*" + AsyncWaitOperatorBenchmark.class.getCanonicalName() + ".*")
.build();
new Runner(options).run();
}
@Setup
public void setUp() {
executor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
}
@TearDown
public void tearDown() {
executor.shutdown();
}
@Benchmark
public void asyncWait(FlinkEnvironmentContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.enableCheckpointing(CHECKPOINT_INTERVAL_MS);
env.setParallelism(1);
DataStreamSource<Long> source = env.addSource(new LongSource(RECORDS_PER_INVOCATION));
DataStream<Long> result = createAsyncOperator(source);
result.addSink(new DiscardingSink<>());
env.execute();
}
private DataStream<Long> createAsyncOperator(DataStreamSource<Long> source) {
switch (outputMode) {
case ORDERED:
return AsyncDataStream.orderedWait(
source, new BenchmarkAsyncFunctionExecutor(), 0, TimeUnit.MILLISECONDS);
case UNORDERED:
return AsyncDataStream.unorderedWait(
source, new BenchmarkAsyncFunctionExecutor(), 0, TimeUnit.MILLISECONDS);
default:
throw new UnsupportedOperationException("Unknown mode");
}
}
private static class BenchmarkAsyncFunctionExecutor extends RichAsyncFunction<Long, Long> {
@Override
public void asyncInvoke(Long input, ResultFuture<Long> resultFuture) {
executor.execute(() -> resultFuture.complete(Collections.singleton(input * 2)));
}
}
}
| 4,600 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/StateBackendBenchmarkBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.benchmark.functions.IntegerLongSource;
import org.apache.flink.contrib.streaming.state.RocksDBStateBackend;
import org.apache.flink.runtime.state.AbstractStateBackend;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.runtime.state.memory.MemoryStateBackend;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.util.FileUtils;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
public class StateBackendBenchmarkBase extends BenchmarkBase {
public enum StateBackend {
MEMORY,
FS,
FS_ASYNC,
ROCKS,
ROCKS_INC
}
public static class StateBackendContext extends FlinkEnvironmentContext {
public final File checkpointDir;
public final int numberOfElements = 1000;
public DataStreamSource<IntegerLongSource.Record> source;
public StateBackendContext() {
try {
checkpointDir = Files.createTempDirectory("bench-").toFile();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public void setUp(StateBackend stateBackend, long recordsPerInvocation) throws IOException {
try {
super.setUp();
} catch (Exception e) {
e.printStackTrace();
}
final AbstractStateBackend backend;
String checkpointDataUri = "file://" + checkpointDir.getAbsolutePath();
switch (stateBackend) {
case MEMORY:
backend = new MemoryStateBackend();
break;
case FS:
backend = new FsStateBackend(checkpointDataUri, false);
break;
case FS_ASYNC:
backend = new FsStateBackend(checkpointDataUri, true);
break;
case ROCKS:
backend = new RocksDBStateBackend(checkpointDataUri, false);
break;
case ROCKS_INC:
backend = new RocksDBStateBackend(checkpointDataUri, true);
break;
default:
throw new UnsupportedOperationException(
"Unknown state backend: " + stateBackend);
}
env.setStateBackend(backend);
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
source = env.addSource(new IntegerLongSource(numberOfElements, recordsPerInvocation));
}
@Override
public void tearDown() throws Exception {
super.tearDown();
FileUtils.deleteDirectory(checkpointDir);
}
}
}
| 4,601 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/BackpressureUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.api.common.JobID;
import org.apache.flink.api.common.time.Deadline;
import org.apache.flink.client.deployment.StandaloneClusterId;
import org.apache.flink.client.program.rest.RestClusterClient;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.runtime.jobgraph.JobVertexID;
import org.apache.flink.runtime.rest.messages.EmptyRequestBody;
import org.apache.flink.runtime.rest.messages.JobVertexBackPressureHeaders;
import org.apache.flink.runtime.rest.messages.JobVertexBackPressureInfo;
import org.apache.flink.runtime.rest.messages.JobVertexMessageParameters;
import org.apache.flink.util.FlinkRuntimeException;
import java.net.URI;
import java.time.Duration;
import java.util.List;
/** Utility class for querying a backpressure status. */
public class BackpressureUtils {
static void waitForBackpressure(
JobID jobID,
List<JobVertexID> sourceId,
URI restAddress,
Configuration clientConfiguration)
throws Exception {
RestClusterClient<StandaloneClusterId> restClient =
createClient(restAddress.getPort(), clientConfiguration);
Deadline deadline = Deadline.fromNow(Duration.ofSeconds(30));
boolean allBackpressured;
// There seems to be a race condition in some setups, between setting up REST server
// and client being able to connect. This is handled by the retrying mechanism in
// the RestClusterClient, but time out takes a lot of time to trigger, so we are
// doing a little bit of sleep here in an attempt to avoid waiting for that timeout.
Thread.sleep(100);
do {
allBackpressured =
sourceId.stream()
.map(id -> queryBackpressure(jobID, id, restClient, restAddress))
.allMatch(
level ->
level
== JobVertexBackPressureInfo
.VertexBackPressureLevel.HIGH);
} while (!allBackpressured && deadline.hasTimeLeft());
if (!allBackpressured) {
throw new FlinkRuntimeException(
"Could not trigger backpressure for the job in given time.");
}
}
private static RestClusterClient<StandaloneClusterId> createClient(
int port, Configuration clientConfiguration) throws Exception {
final Configuration clientConfig = new Configuration();
clientConfig.addAll(clientConfiguration);
clientConfig.setInteger(RestOptions.PORT, port);
return new RestClusterClient<>(clientConfig, StandaloneClusterId.getInstance());
}
private static JobVertexBackPressureInfo.VertexBackPressureLevel queryBackpressure(
JobID jobID, JobVertexID vertexID, RestClusterClient restClient, URI restAddress) {
try {
final JobVertexMessageParameters metricsParameters = new JobVertexMessageParameters();
metricsParameters.jobPathParameter.resolve(jobID);
metricsParameters.jobVertexIdPathParameter.resolve(vertexID);
return ((JobVertexBackPressureInfo)
restClient
.sendRequest(
JobVertexBackPressureHeaders.getInstance(),
metricsParameters,
EmptyRequestBody.getInstance())
.get())
.getBackpressureLevel();
} catch (Exception e) {
throw new FlinkRuntimeException(e);
}
}
}
| 4,602 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/CheckpointEnvironmentContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.api.common.JobID;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.configuration.TaskManagerOptions;
import org.apache.flink.core.execution.JobClient;
import org.apache.flink.runtime.jobgraph.JobVertexID;
import org.apache.flink.runtime.testutils.CommonTestUtils;
import org.apache.flink.streaming.api.environment.ExecutionCheckpointingOptions;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.api.graph.StreamGraph;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.util.List;
import java.util.function.Function;
/**
* Common context to be used for benchmarking checkpointing time.
*
* @see CheckpointingTimeBenchmark
* @see MultiInputCheckpointingTimeBenchmark
*/
public abstract class CheckpointEnvironmentContext extends FlinkEnvironmentContext {
public static final int JOB_PARALLELISM = 4;
public static final MemorySize START_MEMORY_SEGMENT_SIZE = MemorySize.parse("8 kb");
public static final MemorySize MIN_MEMORY_SEGMENT_SIZE = MemorySize.parse("256 b");
public static final Duration DEBLOATING_TARGET = Duration.of(300, ChronoUnit.MILLIS);
public static final int DEBLOATING_STABILIZATION_PERIOD = 2_000;
public JobID jobID;
@Override
public void setUp() throws Exception {
super.setUp();
env.setParallelism(CheckpointEnvironmentContext.JOB_PARALLELISM);
env.enableCheckpointing(Long.MAX_VALUE);
final StreamGraphWithSources streamGraphWithSources = getStreamGraph();
final JobClient jobClient = env.executeAsync(streamGraphWithSources.getStreamGraph());
jobID = jobClient.getJobID();
CommonTestUtils.waitForAllTaskRunning(miniCluster, jobID, false);
BackpressureUtils.waitForBackpressure(
jobID,
streamGraphWithSources.getSources(),
miniCluster.getRestAddress().get(),
miniCluster.getConfiguration());
if (getSleepPostSetUp() > 0) {
Thread.sleep(getSleepPostSetUp());
}
}
protected abstract CheckpointMode getMode();
protected abstract StreamGraphWithSources getStreamGraph();
protected int getSleepPostSetUp() {
return getMode() == CheckpointMode.ALIGNED
? CheckpointEnvironmentContext.DEBLOATING_STABILIZATION_PERIOD
: 0;
}
@Override
protected Configuration createConfiguration() {
return getMode().configure(super.createConfiguration());
}
/**
* Checkpointing configuration to be used in {@link CheckpointingTimeBenchmark} & {@link
* MultiInputCheckpointingTimeBenchmark}.
*/
public enum CheckpointMode {
UNALIGNED(
config -> {
config.set(ExecutionCheckpointingOptions.ENABLE_UNALIGNED, true);
config.set(
TaskManagerOptions.MEMORY_SEGMENT_SIZE,
CheckpointEnvironmentContext.START_MEMORY_SEGMENT_SIZE);
config.set(
ExecutionCheckpointingOptions.ALIGNED_CHECKPOINT_TIMEOUT,
Duration.ofMillis(0));
config.set(TaskManagerOptions.BUFFER_DEBLOAT_ENABLED, false);
return config;
}),
UNALIGNED_1(
config -> {
config.set(ExecutionCheckpointingOptions.ENABLE_UNALIGNED, true);
config.set(
TaskManagerOptions.MEMORY_SEGMENT_SIZE,
CheckpointEnvironmentContext.START_MEMORY_SEGMENT_SIZE);
config.set(
ExecutionCheckpointingOptions.ALIGNED_CHECKPOINT_TIMEOUT,
Duration.ofMillis(1));
config.set(TaskManagerOptions.BUFFER_DEBLOAT_ENABLED, false);
return config;
}),
ALIGNED(
config -> {
config.set(ExecutionCheckpointingOptions.ENABLE_UNALIGNED, false);
config.set(
TaskManagerOptions.MEMORY_SEGMENT_SIZE,
CheckpointEnvironmentContext.START_MEMORY_SEGMENT_SIZE);
config.set(
TaskManagerOptions.MIN_MEMORY_SEGMENT_SIZE,
CheckpointEnvironmentContext.MIN_MEMORY_SEGMENT_SIZE);
config.set(TaskManagerOptions.BUFFER_DEBLOAT_ENABLED, true);
config.set(
TaskManagerOptions.BUFFER_DEBLOAT_TARGET,
CheckpointEnvironmentContext.DEBLOATING_TARGET);
config.set(
TaskManagerOptions.BUFFER_DEBLOAT_PERIOD,
Duration.of(200, ChronoUnit.MILLIS));
config.set(TaskManagerOptions.BUFFER_DEBLOAT_SAMPLES, 5);
return config;
});
private final Function<Configuration, Configuration> configFunc;
CheckpointMode(Function<Configuration, Configuration> configFunc) {
this.configFunc = configFunc;
}
public Configuration configure(Configuration config) {
return configFunc.apply(config);
}
}
/** A simple wrapper to pass a {@link StreamGraph} along with ids of sources it contains. */
public static class StreamGraphWithSources {
private final StreamGraph streamGraph;
private final List<JobVertexID> sources;
public StreamGraphWithSources(StreamGraph streamGraph, List<JobVertexID> sources) {
this.streamGraph = streamGraph;
this.sources = sources;
}
public StreamGraph getStreamGraph() {
return streamGraph;
}
public List<JobVertexID> getSources() {
return sources;
}
}
/**
* The custom sink for processing records slowly to cause accumulate in-flight buffers even back
* pressure.
*/
public static class SlowDiscardSink<T> implements SinkFunction<T> {
@Override
public void invoke(T value, Context context) {
final long startTime = System.nanoTime();
while (System.nanoTime() - startTime < 200_000) {}
}
}
}
| 4,603 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/RemoteChannelThroughputBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.benchmark.functions.LongSource;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.TaskManagerOptions;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.DiscardingSink;
import org.apache.flink.streaming.api.graph.StreamingJobGraphGenerator;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
@OperationsPerInvocation(value = RemoteChannelThroughputBenchmark.RECORDS_PER_INVOCATION)
public class RemoteChannelThroughputBenchmark extends RemoteBenchmarkBase {
private static final String ALIGNED = "ALIGNED";
private static final String DEBLOAT = "DEBLOAT";
private static final String UNALIGNED = "UNALIGNED";
private static final int NUM_VERTICES = 3;
private static final long CHECKPOINT_INTERVAL_MS = 100;
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(RemoteChannelThroughputBenchmark.class.getCanonicalName())
.build();
new Runner(options).run();
}
@Benchmark
public void remoteRebalance(RemoteChannelThroughputBenchmarkContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.enableCheckpointing(CHECKPOINT_INTERVAL_MS);
env.setParallelism(PARALLELISM);
env.getCheckpointConfig().enableUnalignedCheckpoints(context.mode.equals(UNALIGNED));
DataStreamSource<Long> source = env.addSource(new LongSource(RECORDS_PER_SUBTASK));
source.slotSharingGroup("source")
.rebalance()
.map((MapFunction<Long, Long>) value -> value)
.slotSharingGroup("map")
.rebalance()
.addSink(new DiscardingSink<>())
.slotSharingGroup("sink");
context.miniCluster.executeJobBlocking(
StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph()));
}
@State(Scope.Thread)
public static class RemoteChannelThroughputBenchmarkContext extends RemoteBenchmarkContext {
@Param({ALIGNED, UNALIGNED, DEBLOAT})
public String mode = ALIGNED;
@Override
protected Configuration createConfiguration() {
Configuration configuration = super.createConfiguration();
if (mode.equals(DEBLOAT)) {
configuration.setBoolean(TaskManagerOptions.BUFFER_DEBLOAT_ENABLED, true);
}
return configuration;
}
@Override
protected int getNumberOfVertices() {
return NUM_VERTICES;
}
}
}
| 4,604 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/BlockingPartitionBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.CoreOptions;
import org.apache.flink.configuration.NettyShuffleEnvironmentOptions;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.graph.StreamGraph;
import org.apache.flink.util.FileUtils;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
/** JMH throughput benchmark runner. */
@OperationsPerInvocation(value = BlockingPartitionBenchmark.RECORDS_PER_INVOCATION)
public class BlockingPartitionBenchmark extends BenchmarkBase {
public static final int RECORDS_PER_INVOCATION = 15_000_000;
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(".*" + BlockingPartitionBenchmark.class.getCanonicalName() + ".*")
.build();
new Runner(options).run();
}
@Benchmark
public void uncompressedFilePartition(UncompressedFileEnvironmentContext context)
throws Exception {
executeBenchmark(context.env);
}
@Benchmark
public void compressedFilePartition(CompressedFileEnvironmentContext context) throws Exception {
executeBenchmark(context.env);
}
@Benchmark
public void uncompressedMmapPartition(UncompressedMmapEnvironmentContext context)
throws Exception {
executeBenchmark(context.env);
}
@Benchmark
public void compressedSortPartition(CompressedSortEnvironmentContext context) throws Exception {
executeBenchmark(context.env);
}
@Benchmark
public void uncompressedSortPartition(UncompressedSortEnvironmentContext context) throws Exception {
executeBenchmark(context.env);
}
private void executeBenchmark(StreamExecutionEnvironment env) throws Exception {
StreamGraph streamGraph =
StreamGraphUtils.buildGraphForBatchJob(env, RECORDS_PER_INVOCATION);
env.execute(streamGraph);
}
/** Setup for the benchmark(s). */
public static class BlockingPartitionEnvironmentContext extends FlinkEnvironmentContext {
/**
* Parallelism of 1 causes the reads/writes to be always sequential and only covers the case
* of one reader. More parallelism should be more suitable for finding performance
* regressions of the code. Considering that the benchmarking machine has 4 CPU cores, we
* set the parallelism to 4.
*/
private final int parallelism = 4;
@Override
public void setUp() throws Exception {
super.setUp();
env.setParallelism(parallelism);
env.setBufferTimeout(-1);
}
protected Configuration createConfiguration(
boolean compressionEnabled, String subpartitionType, boolean isSortShuffle) {
Configuration configuration = super.createConfiguration();
if (isSortShuffle) {
configuration.setInteger(
NettyShuffleEnvironmentOptions.NETWORK_SORT_SHUFFLE_MIN_PARALLELISM, 1);
} else {
configuration.setInteger(
NettyShuffleEnvironmentOptions.NETWORK_SORT_SHUFFLE_MIN_PARALLELISM,
Integer.MAX_VALUE);
}
configuration.setBoolean(
NettyShuffleEnvironmentOptions.BATCH_SHUFFLE_COMPRESSION_ENABLED,
compressionEnabled);
configuration.setString(
NettyShuffleEnvironmentOptions.NETWORK_BLOCKING_SHUFFLE_TYPE, subpartitionType);
configuration.setString(
CoreOptions.TMP_DIRS,
FileUtils.getCurrentWorkingDirectory().toAbsolutePath().toString());
return configuration;
}
}
public static class UncompressedFileEnvironmentContext
extends BlockingPartitionEnvironmentContext {
@Override
protected Configuration createConfiguration() {
return createConfiguration(false, "file", false);
}
}
public static class CompressedFileEnvironmentContext
extends BlockingPartitionEnvironmentContext {
@Override
protected Configuration createConfiguration() {
return createConfiguration(true, "file", false);
}
}
public static class UncompressedMmapEnvironmentContext
extends BlockingPartitionEnvironmentContext {
@Override
protected Configuration createConfiguration() {
return createConfiguration(false, "mmap", false);
}
}
public static class CompressedSortEnvironmentContext
extends BlockingPartitionEnvironmentContext {
@Override
protected Configuration createConfiguration() {
return createConfiguration(true, "file", true);
}
}
public static class UncompressedSortEnvironmentContext
extends BlockingPartitionEnvironmentContext {
@Override
protected Configuration createConfiguration() {
return createConfiguration(false, "file", true);
}
}
}
| 4,605 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/WatermarkAggregationBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.runtime.source.coordinator.SourceCoordinatorAlignmentBenchmark;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
/** The watermark aggregation benchmark for source coordinator when enabling the watermark alignment. */
public class WatermarkAggregationBenchmark extends BenchmarkBase {
private static final int NUM_SUBTASKS = 5000;
private static final int ROUND_PER_INVOCATION = 10;
private SourceCoordinatorAlignmentBenchmark benchmark;
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(".*" + WatermarkAggregationBenchmark.class.getCanonicalName() + ".*")
.build();
new Runner(options).run();
}
@Setup(Level.Trial)
public void setup() throws Exception {
benchmark = new SourceCoordinatorAlignmentBenchmark();
benchmark.setup(NUM_SUBTASKS);
}
@Benchmark
@OperationsPerInvocation(NUM_SUBTASKS * ROUND_PER_INVOCATION)
public void aggregateWatermark() {
for (int round = 0; round < ROUND_PER_INVOCATION; round++) {
benchmark.sendReportedWatermarkToAllSubtasks();
}
}
@TearDown(Level.Trial)
public void teardown() throws Exception {
benchmark.teardown();
}
}
| 4,606 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/InputBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.benchmark.functions.LongSourceType;
import org.apache.flink.benchmark.functions.MultiplyByTwo;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.DiscardingSink;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
@OperationsPerInvocation(value = InputBenchmark.RECORDS_PER_INVOCATION)
public class InputBenchmark extends BenchmarkBase {
public static final int RECORDS_PER_INVOCATION = 15_000_000;
private static final long CHECKPOINT_INTERVAL_MS = 100;
@Param({"LEGACY", "F27_UNBOUNDED"})
public LongSourceType sourceType;
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(".*" + InputBenchmark.class.getCanonicalName() + ".*")
.build();
new Runner(options).run();
}
@Benchmark
public void mapSink(FlinkEnvironmentContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.enableCheckpointing(CHECKPOINT_INTERVAL_MS);
env.setParallelism(1);
DataStreamSource<Long> source = sourceType.source(env, RECORDS_PER_INVOCATION);
source.map(new MultiplyByTwo()).addSink(new DiscardingSink<>());
env.execute();
}
@Benchmark
public void mapRebalanceMapSink(FlinkEnvironmentContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.enableCheckpointing(CHECKPOINT_INTERVAL_MS);
env.setParallelism(1);
DataStreamSource<Long> source = sourceType.source(env, RECORDS_PER_INVOCATION);
source.map(new MultiplyByTwo())
.rebalance()
.map((Long in) -> in)
.addSink(new DiscardingSink<>());
env.execute();
}
}
| 4,607 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/MultiInputCheckpointingTimeBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.benchmark.operators.RecordSource;
import org.apache.flink.benchmark.operators.RecordSource.Record;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.runtime.jobgraph.JobGraph;
import org.apache.flink.runtime.jobgraph.JobVertex;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.functions.co.CoMapFunction;
import org.apache.flink.streaming.api.graph.StreamGraph;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.apache.flink.api.common.eventtime.WatermarkStrategy.noWatermarks;
/**
* The test verifies that the debloating kicks in and properly downsizes buffers in case of multi
* input gates with different throughput. In the end the checkpoint should take ~1(number of
* rebalance) * DEBLOATING_TARGET.
*/
@OutputTimeUnit(SECONDS)
public class MultiInputCheckpointingTimeBenchmark extends BenchmarkBase {
public static final MemorySize SMALL_RECORD_SIZE = MemorySize.parse("1b");
public static final MemorySize BIG_RECORD_SIZE = MemorySize.parse("1kb");
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(MultiInputCheckpointingTimeBenchmark.class.getCanonicalName())
.build();
new Runner(options).run();
}
@Benchmark
public void checkpointMultiInput(MultiInputCheckpointEnvironmentContext context)
throws Exception {
final CompletableFuture<String> checkpoint =
context.miniCluster.triggerCheckpoint(context.jobID);
checkpoint.get();
}
@State(Scope.Thread)
public static class MultiInputCheckpointEnvironmentContext
extends CheckpointEnvironmentContext {
private static final int NUM_OF_VERTICES = 3;
@Override
protected CheckpointMode getMode() {
return CheckpointMode.ALIGNED;
}
@Override
protected StreamGraphWithSources getStreamGraph() {
DataStream<Record> source1 =
env.fromSource(
new RecordSource(
Integer.MAX_VALUE, (int) SMALL_RECORD_SIZE.getBytes()),
noWatermarks(),
RecordSource.class.getName())
.slotSharingGroup("source-small-records")
.rebalance();
DataStream<Record> source2 =
env.fromSource(
new RecordSource(
Integer.MAX_VALUE, (int) BIG_RECORD_SIZE.getBytes()),
noWatermarks(),
RecordSource.class.getName())
.slotSharingGroup("source-big-records")
.rebalance();
source1.connect(source2)
.map(
new CoMapFunction<Record, Record, Record>() {
@Override
public Record map1(Record record) throws Exception {
return record;
}
@Override
public Record map2(Record record) throws Exception {
return record;
}
})
.name("co-map")
.slotSharingGroup("map-and-sink")
.addSink(new SlowDiscardSink<>())
.slotSharingGroup("map-and-sink");
final StreamGraph streamGraph = env.getStreamGraph(false);
final JobGraph jobGraph = streamGraph.getJobGraph();
final List<JobVertex> vertices = jobGraph.getVerticesSortedTopologicallyFromSources();
return new StreamGraphWithSources(
streamGraph, Arrays.asList(vertices.get(0).getID(), vertices.get(1).getID()));
}
@Override
protected int getNumberOfTaskManagers() {
return NUM_OF_VERTICES * JOB_PARALLELISM;
}
}
}
| 4,608 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/RocksStateBackendBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.benchmark.functions.IntLongApplications;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.contrib.streaming.state.RocksDBOptions;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import static org.openjdk.jmh.annotations.Scope.Thread;
@OperationsPerInvocation(value = RocksStateBackendBenchmark.RECORDS_PER_INVOCATION)
public class RocksStateBackendBenchmark extends StateBackendBenchmarkBase {
public static final int RECORDS_PER_INVOCATION = 2_000_000;
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(".*" + RocksStateBackendBenchmark.class.getCanonicalName() + ".*")
.build();
new Runner(options).run();
}
@Benchmark
public void stateBackends(RocksStateBackendContext context) throws Exception {
IntLongApplications.reduceWithWindow(
context.source, TumblingEventTimeWindows.of(Time.seconds(10_000)));
context.execute();
}
@State(Thread)
public static class RocksStateBackendContext extends StateBackendContext {
@Param({"ROCKS", "ROCKS_INC"})
public StateBackend stateBackend = StateBackend.MEMORY;
@Override
public void setUp() throws Exception {
super.setUp(stateBackend, RECORDS_PER_INVOCATION);
}
@Override
protected Configuration createConfiguration() {
Configuration configuration = super.createConfiguration();
// explicit set the managed memory as 322122552 bytes, which is the default managed
// memory of 1GB TM with 1 slot.
configuration.set(
RocksDBOptions.FIX_PER_SLOT_MEMORY_SIZE, MemorySize.parse("322122552b"));
return configuration;
}
}
}
| 4,609 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/MultipleInputBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
import org.apache.flink.api.connector.source.Boundedness;
import org.apache.flink.api.connector.source.ReaderOutput;
import org.apache.flink.api.connector.source.SourceReader;
import org.apache.flink.api.connector.source.SourceReaderContext;
import org.apache.flink.api.connector.source.lib.NumberSequenceSource;
import org.apache.flink.api.connector.source.mocks.MockSource;
import org.apache.flink.api.connector.source.mocks.MockSourceReader;
import org.apache.flink.api.connector.source.mocks.MockSourceSplit;
import org.apache.flink.benchmark.functions.LongSource;
import org.apache.flink.benchmark.functions.QueuingLongSource;
import org.apache.flink.benchmark.operators.MultiplyByTwoOperatorFactory;
import org.apache.flink.core.io.InputStatus;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.MultipleConnectedStreams;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.DiscardingSink;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.api.operators.ChainingStrategy;
import org.apache.flink.streaming.api.transformations.MultipleInputTransformation;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import java.util.concurrent.CompletableFuture;
public class MultipleInputBenchmark extends BenchmarkBase {
public static final int RECORDS_PER_INVOCATION = TwoInputBenchmark.RECORDS_PER_INVOCATION;
public static final int ONE_IDLE_RECORDS_PER_INVOCATION =
TwoInputBenchmark.ONE_IDLE_RECORDS_PER_INVOCATION;
public static final long CHECKPOINT_INTERVAL_MS = TwoInputBenchmark.CHECKPOINT_INTERVAL_MS;
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(".*" + MultipleInputBenchmark.class.getSimpleName() + ".*")
.build();
new Runner(options).run();
}
private static void connectAndDiscard(
StreamExecutionEnvironment env, DataStream<?> source1, DataStream<?> source2) {
MultipleInputTransformation<Long> transform =
new MultipleInputTransformation<>(
"custom operator",
new MultiplyByTwoOperatorFactory(),
BasicTypeInfo.LONG_TYPE_INFO,
1);
transform.addInput(source1.getTransformation());
transform.addInput(source2.getTransformation());
env.addOperator(transform);
new MultipleConnectedStreams(env).transform(transform).addSink(new DiscardingSink<>());
}
@Benchmark
@OperationsPerInvocation(RECORDS_PER_INVOCATION)
public void multiInputMapSink(FlinkEnvironmentContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.enableCheckpointing(CHECKPOINT_INTERVAL_MS);
long numRecordsPerInput = RECORDS_PER_INVOCATION / 2;
DataStreamSource<Long> source1 = env.addSource(new LongSource(numRecordsPerInput));
DataStreamSource<Long> source2 = env.addSource(new LongSource(numRecordsPerInput));
connectAndDiscard(env, source1, source2);
env.execute();
}
@Benchmark
@OperationsPerInvocation(ONE_IDLE_RECORDS_PER_INVOCATION)
public void multiInputOneIdleMapSink(FlinkEnvironmentContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.enableCheckpointing(CHECKPOINT_INTERVAL_MS);
QueuingLongSource.reset();
DataStreamSource<Long> source1 =
env.addSource(new QueuingLongSource(1, ONE_IDLE_RECORDS_PER_INVOCATION - 1));
DataStreamSource<Long> source2 = env.addSource(new QueuingLongSource(2, 1));
connectAndDiscard(env, source1, source2);
env.execute();
}
@Benchmark
@OperationsPerInvocation(RECORDS_PER_INVOCATION)
public void multiInputChainedIdleSource(FlinkEnvironmentContext context) throws Exception {
final StreamExecutionEnvironment env = context.env;
env.getConfig().enableObjectReuse();
final DataStream<Long> source1 =
env.fromSource(
new NumberSequenceSource(1L, RECORDS_PER_INVOCATION),
WatermarkStrategy.noWatermarks(),
"source-1");
final DataStreamSource<Integer> source2 =
env.fromSource(new IdlingSource(1), WatermarkStrategy.noWatermarks(), "source-2");
MultipleInputTransformation<Long> transform =
new MultipleInputTransformation<>(
"custom operator",
new MultiplyByTwoOperatorFactory(),
BasicTypeInfo.LONG_TYPE_INFO,
1);
transform.addInput(((DataStream<?>) source1).getTransformation());
transform.addInput(((DataStream<?>) source2).getTransformation());
transform.setChainingStrategy(ChainingStrategy.HEAD_WITH_SOURCES);
env.addOperator(transform);
new MultipleConnectedStreams(env)
.transform(transform)
.addSink(new SinkClosingIdlingSource())
.setParallelism(1);
context.execute();
}
private static class IdlingSource extends MockSource {
private static CompletableFuture<Void> canFinish = new CompletableFuture<>();
public IdlingSource(int numSplits) {
super(Boundedness.BOUNDED, numSplits, true, true);
}
public static void signalCanFinish() {
canFinish.complete(null);
}
public static void reset() {
canFinish.completeExceptionally(new IllegalStateException("State has been reset"));
canFinish = new CompletableFuture<>();
}
@Override
public SourceReader<Integer, MockSourceSplit> createReader(
SourceReaderContext readerContext) {
return new MockSourceReader(true, true) {
@Override
public InputStatus pollNext(ReaderOutput<Integer> sourceOutput) {
if (canFinish.isDone() && !canFinish.isCompletedExceptionally()) {
return InputStatus.END_OF_INPUT;
} else {
return InputStatus.NOTHING_AVAILABLE;
}
}
@Override
public synchronized CompletableFuture<Void> isAvailable() {
return canFinish;
}
};
}
}
private static class SinkClosingIdlingSource implements SinkFunction<Long> {
private int recordsSoFar = 0;
@Override
public void invoke(Long value) {
if (++recordsSoFar >= RECORDS_PER_INVOCATION) {
IdlingSource.signalCanFinish();
}
}
}
}
| 4,610 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/SortingBoundedInputBenchmarks.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
import org.apache.flink.configuration.AlgorithmOptions;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.ExecutionOptions;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.DiscardingSink;
import org.apache.flink.streaming.api.operators.AbstractStreamOperator;
import org.apache.flink.streaming.api.operators.AbstractStreamOperatorV2;
import org.apache.flink.streaming.api.operators.BoundedMultiInput;
import org.apache.flink.streaming.api.operators.BoundedOneInput;
import org.apache.flink.streaming.api.operators.ChainingStrategy;
import org.apache.flink.streaming.api.operators.Input;
import org.apache.flink.streaming.api.operators.MultipleInputStreamOperator;
import org.apache.flink.streaming.api.operators.OneInputStreamOperator;
import org.apache.flink.streaming.api.operators.StreamOperator;
import org.apache.flink.streaming.api.operators.StreamOperatorFactory;
import org.apache.flink.streaming.api.operators.StreamOperatorParameters;
import org.apache.flink.streaming.api.operators.TwoInputStreamOperator;
import org.apache.flink.streaming.api.transformations.KeyedMultipleInputTransformation;
import org.apache.flink.streaming.runtime.streamrecord.LatencyMarker;
import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
import org.apache.flink.streaming.runtime.watermarkstatus.WatermarkStatus;
import org.apache.flink.util.SplittableIterator;
import org.junit.Assert;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static org.openjdk.jmh.annotations.Scope.Thread;
/** An end to end test for sorted inputs for a keyed operator with bounded inputs. */
public class SortingBoundedInputBenchmarks extends BenchmarkBase {
private static final int RECORDS_PER_INVOCATION = 1_500_000;
private static final List<Integer> INDICES =
IntStream.range(0, 10).boxed().collect(Collectors.toList());
static {
Collections.shuffle(INDICES);
}
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(
".*"
+ SortingBoundedInputBenchmarks.class.getCanonicalName()
+ ".*")
.build();
new Runner(options).run();
}
@Benchmark
@OperationsPerInvocation(value = RECORDS_PER_INVOCATION)
public void sortedOneInput(SortingInputContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
DataStreamSource<Integer> elements =
env.fromParallelCollection(
new InputGenerator(RECORDS_PER_INVOCATION), BasicTypeInfo.INT_TYPE_INFO);
SingleOutputStreamOperator<Long> counts =
elements.keyBy(element -> element)
.transform(
"Asserting operator",
BasicTypeInfo.LONG_TYPE_INFO,
new AssertingOperator());
counts.addSink(new DiscardingSink<>());
context.execute();
}
@Benchmark
@OperationsPerInvocation(value = RECORDS_PER_INVOCATION)
public void sortedTwoInput(SortingInputContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
DataStreamSource<Integer> elements1 =
env.fromParallelCollection(
new InputGenerator(RECORDS_PER_INVOCATION / 2),
BasicTypeInfo.INT_TYPE_INFO);
DataStreamSource<Integer> elements2 =
env.fromParallelCollection(
new InputGenerator(RECORDS_PER_INVOCATION / 2),
BasicTypeInfo.INT_TYPE_INFO);
SingleOutputStreamOperator<Long> counts =
elements1
.connect(elements2)
.keyBy(element -> element, element -> element)
.transform(
"Asserting operator",
BasicTypeInfo.LONG_TYPE_INFO,
new AssertingTwoInputOperator());
counts.addSink(new DiscardingSink<>());
context.execute();
}
@Benchmark
@OperationsPerInvocation(value = RECORDS_PER_INVOCATION)
public void sortedMultiInput(SortingInputContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
KeyedStream<Integer, Object> elements1 =
env.fromParallelCollection(
new InputGenerator(RECORDS_PER_INVOCATION / 3),
BasicTypeInfo.INT_TYPE_INFO)
.keyBy(el -> el);
KeyedStream<Integer, Object> elements2 =
env.fromParallelCollection(
new InputGenerator(RECORDS_PER_INVOCATION / 3),
BasicTypeInfo.INT_TYPE_INFO)
.keyBy(el -> el);
KeyedStream<Integer, Object> elements3 =
env.fromParallelCollection(
new InputGenerator(RECORDS_PER_INVOCATION / 3),
BasicTypeInfo.INT_TYPE_INFO)
.keyBy(el -> el);
KeyedMultipleInputTransformation<Long> assertingTransformation =
new KeyedMultipleInputTransformation<>(
"Asserting operator",
new AssertingThreeInputOperatorFactory(),
BasicTypeInfo.LONG_TYPE_INFO,
-1,
BasicTypeInfo.INT_TYPE_INFO);
assertingTransformation.addInput(elements1.getTransformation(), elements1.getKeySelector());
assertingTransformation.addInput(elements2.getTransformation(), elements2.getKeySelector());
assertingTransformation.addInput(elements3.getTransformation(), elements3.getKeySelector());
env.addOperator(assertingTransformation);
DataStream<Long> counts = new DataStream<>(env, assertingTransformation);
counts.addSink(new DiscardingSink<>());
context.execute();
}
@State(Thread)
public static class SortingInputContext extends FlinkEnvironmentContext {
@Override
protected Configuration createConfiguration() {
Configuration configuration = super.createConfiguration();
configuration.set(ExecutionOptions.RUNTIME_MODE, RuntimeExecutionMode.BATCH);
configuration.set(AlgorithmOptions.SORT_SPILLING_THRESHOLD, 0f);
return configuration;
}
}
private static final class ProcessedKeysOrderAsserter implements Serializable {
private final Set<Integer> seenKeys = new HashSet<>();
private long seenRecords = 0;
private Integer currentKey = null;
public void processElement(Integer element) {
this.seenRecords++;
if (!Objects.equals(element, currentKey)) {
if (!seenKeys.add(element)) {
Assert.fail("Received an out of order key: " + element);
}
currentKey = element;
}
}
public long getSeenRecords() {
return seenRecords;
}
}
private static class AssertingOperator extends AbstractStreamOperator<Long>
implements OneInputStreamOperator<Integer, Long>, BoundedOneInput {
private final ProcessedKeysOrderAsserter asserter = new ProcessedKeysOrderAsserter();
@Override
public void processElement(StreamRecord<Integer> element) {
asserter.processElement(element.getValue());
}
@Override
public void endInput() {
output.collect(new StreamRecord<>(asserter.getSeenRecords()));
}
}
private static class AssertingTwoInputOperator extends AbstractStreamOperator<Long>
implements TwoInputStreamOperator<Integer, Integer, Long>, BoundedMultiInput {
private final ProcessedKeysOrderAsserter asserter = new ProcessedKeysOrderAsserter();
private boolean input1Finished = false;
private boolean input2Finished = false;
@Override
public void processElement1(StreamRecord<Integer> element) {
asserter.processElement(element.getValue());
}
@Override
public void processElement2(StreamRecord<Integer> element) {
asserter.processElement(element.getValue());
}
@Override
public void endInput(int inputId) {
if (inputId == 1) {
input1Finished = true;
}
if (inputId == 2) {
input2Finished = true;
}
if (input1Finished && input2Finished) {
output.collect(new StreamRecord<>(asserter.getSeenRecords()));
}
}
}
private static class AssertingThreeInputOperator extends AbstractStreamOperatorV2<Long>
implements MultipleInputStreamOperator<Long>, BoundedMultiInput {
private final ProcessedKeysOrderAsserter asserter = new ProcessedKeysOrderAsserter();
private boolean input1Finished = false;
private boolean input2Finished = false;
private boolean input3Finished = false;
public AssertingThreeInputOperator(
StreamOperatorParameters<Long> parameters, int numberOfInputs) {
super(parameters, 3);
assert numberOfInputs == 3;
}
@Override
public void endInput(int inputId) {
if (inputId == 1) {
input1Finished = true;
}
if (inputId == 2) {
input2Finished = true;
}
if (inputId == 3) {
input3Finished = true;
}
if (input1Finished && input2Finished && input3Finished) {
output.collect(new StreamRecord<>(asserter.getSeenRecords()));
}
}
@Override
public List<Input> getInputs() {
return Arrays.asList(
new SingleInput(asserter::processElement),
new SingleInput(asserter::processElement),
new SingleInput(asserter::processElement));
}
}
private static class AssertingThreeInputOperatorFactory implements StreamOperatorFactory<Long> {
@Override
@SuppressWarnings("unchecked")
public <T extends StreamOperator<Long>> T createStreamOperator(
StreamOperatorParameters<Long> parameters) {
return (T) new AssertingThreeInputOperator(parameters, 3);
}
@Override
public ChainingStrategy getChainingStrategy() {
return ChainingStrategy.NEVER;
}
@Override
public void setChainingStrategy(ChainingStrategy strategy) {}
@Override
public Class<? extends StreamOperator> getStreamOperatorClass(ClassLoader classLoader) {
return AssertingThreeInputOperator.class;
}
}
private static class SingleInput implements Input<Integer> {
private final Consumer<Integer> recordConsumer;
private SingleInput(Consumer<Integer> recordConsumer) {
this.recordConsumer = recordConsumer;
}
@Override
public void processElement(StreamRecord<Integer> element) {
recordConsumer.accept(element.getValue());
}
@Override
public void processWatermark(org.apache.flink.streaming.api.watermark.Watermark mark) {}
@Override
public void processLatencyMarker(LatencyMarker latencyMarker) {}
@Override
public void setKeyContextElement(StreamRecord<Integer> record) {}
@Override
public void processWatermarkStatus(WatermarkStatus watermarkStatus) throws Exception {}
}
private static class InputGenerator extends SplittableIterator<Integer> {
private final long numberOfRecords;
private long generatedRecords;
private InputGenerator(long numberOfRecords) {
this.numberOfRecords = numberOfRecords;
}
@Override
@SuppressWarnings("unchecked")
public Iterator<Integer>[] split(int numPartitions) {
long numberOfRecordsPerPartition = numberOfRecords / numPartitions;
long remainder = numberOfRecords % numPartitions;
Iterator<Integer>[] iterators = new Iterator[numPartitions];
for (int i = 0; i < numPartitions - 1; i++) {
iterators[i] = new InputGenerator(numberOfRecordsPerPartition);
}
iterators[numPartitions - 1] =
new InputGenerator(numberOfRecordsPerPartition + remainder);
return iterators;
}
@Override
public int getMaximumNumberOfSplits() {
return (int) Math.min(numberOfRecords, Integer.MAX_VALUE);
}
@Override
public boolean hasNext() {
return generatedRecords < numberOfRecords;
}
@Override
public Integer next() {
if (hasNext()) {
generatedRecords++;
return INDICES.get((int) (generatedRecords % INDICES.size()));
}
return null;
}
}
}
| 4,611 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/SerializationFrameworkMiniBenchmarks.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.api.common.ExecutionConfig;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple8;
import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
import org.apache.flink.benchmark.full.StringSerializationBenchmark;
import org.apache.flink.benchmark.functions.BaseSourceWithKeyRange;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.DiscardingSink;
import org.apache.flink.types.Row;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import java.util.Arrays;
import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
/** Benchmark for serializing POJOs and Tuples with different serialization frameworks. */
public class SerializationFrameworkMiniBenchmarks extends BenchmarkBase {
protected static final int RECORDS_PER_INVOCATION = 300_000;
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(
".*"
+ SerializationFrameworkMiniBenchmarks.class
.getCanonicalName()
+ ".*")
.build();
new Runner(options).run();
}
@Benchmark
@OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION)
public void serializerPojo(FlinkEnvironmentContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.setParallelism(4);
ExecutionConfig executionConfig = env.getConfig();
executionConfig.registerPojoType(MyPojo.class);
executionConfig.registerPojoType(MyOperation.class);
env.addSource(new PojoSource(RECORDS_PER_INVOCATION, 10))
.rebalance()
.addSink(new DiscardingSink<>());
env.execute();
}
@Benchmark
@OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION)
public void serializerHeavyString(FlinkEnvironmentContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.setParallelism(1);
ExecutionConfig executionConfig = env.getConfig();
executionConfig.registerPojoType(MyPojo.class);
executionConfig.registerPojoType(MyOperation.class);
env.addSource(new LongStringSource(RECORDS_PER_INVOCATION, 12))
.rebalance()
.addSink(new DiscardingSink<>());
env.execute();
}
@Benchmark
@OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION)
public void serializerTuple(FlinkEnvironmentContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.setParallelism(4);
env.addSource(new TupleSource(RECORDS_PER_INVOCATION, 10))
.rebalance()
.addSink(new DiscardingSink<>());
env.execute();
}
@Benchmark
@OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION)
public void serializerKryo(FlinkEnvironmentContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.setParallelism(4);
ExecutionConfig executionConfig = env.getConfig();
executionConfig.enableForceKryo();
executionConfig.registerKryoType(MyPojo.class);
executionConfig.registerKryoType(MyOperation.class);
env.addSource(new PojoSource(RECORDS_PER_INVOCATION, 10))
.rebalance()
.addSink(new DiscardingSink<>());
env.execute();
}
@Benchmark
@OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION)
public void serializerAvro(FlinkEnvironmentContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.setParallelism(4);
env.addSource(new AvroPojoSource(RECORDS_PER_INVOCATION, 10))
.rebalance()
.addSink(new DiscardingSink<>());
env.execute();
}
@Benchmark
@OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION)
public void serializerRow(FlinkEnvironmentContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.setParallelism(4);
env.addSource(new RowSource(RECORDS_PER_INVOCATION, 10))
.rebalance()
.addSink(new DiscardingSink<>());
env.execute();
}
/** Source emitting a long String. */
public static class LongStringSource extends BaseSourceWithKeyRange<String> {
private static final long serialVersionUID = 3746240885982877398L;
private String[] templates;
public LongStringSource(int numEvents, int numKeys) {
super(numEvents, numKeys);
}
@Override
protected void init() {
super.init();
templates =
new String[] {
makeString(StringSerializationBenchmark.asciiChars, 1024),
makeString(StringSerializationBenchmark.russianChars, 1024),
makeString(StringSerializationBenchmark.chineseChars, 1024)
};
}
private String makeString(char[] symbols, int length) {
char[] buffer = new char[length];
Random random = ThreadLocalRandom.current();
Arrays.fill(buffer, symbols[random.nextInt(symbols.length)]);
return new String(buffer);
}
@Override
protected String getElement(int keyId) {
return templates[keyId % templates.length];
}
}
/** Source emitting a simple {@link MyPojo POJO}. */
public static class PojoSource extends BaseSourceWithKeyRange<MyPojo> {
private static final long serialVersionUID = 2941333602938145526L;
private transient MyPojo template;
public PojoSource(int numEvents, int numKeys) {
super(numEvents, numKeys);
}
@Override
protected void init() {
super.init();
template =
new MyPojo(
0,
"myName",
new String[] {"op1", "op2", "op3", "op4"},
new MyOperation[] {
new MyOperation(1, "op1"),
new MyOperation(2, "op2"),
new MyOperation(3, "op3")
},
1,
2,
3,
"null");
}
@Override
protected MyPojo getElement(int keyId) {
template.setId(keyId);
return template;
}
}
/**
* Source emitting a {@link org.apache.flink.benchmark.avro.MyPojo POJO} generated by an Avro
* schema.
*/
public static class AvroPojoSource
extends BaseSourceWithKeyRange<org.apache.flink.benchmark.avro.MyPojo> {
private static final long serialVersionUID = 2941333602938145526L;
private transient org.apache.flink.benchmark.avro.MyPojo template;
public AvroPojoSource(int numEvents, int numKeys) {
super(numEvents, numKeys);
}
@Override
protected void init() {
super.init();
template =
new org.apache.flink.benchmark.avro.MyPojo(
0,
"myName",
Arrays.asList("op1", "op2", "op3", "op4"),
Arrays.asList(
new org.apache.flink.benchmark.avro.MyOperation(1, "op1"),
new org.apache.flink.benchmark.avro.MyOperation(2, "op2"),
new org.apache.flink.benchmark.avro.MyOperation(3, "op3")),
1,
2,
3,
"null");
}
@Override
protected org.apache.flink.benchmark.avro.MyPojo getElement(int keyId) {
template.setId(keyId);
return template;
}
}
/** Source emitting a <tt>Tuple</tt> based on {@link MyPojo}. */
public static class TupleSource
extends BaseSourceWithKeyRange<
Tuple8<
Integer,
String,
String[],
Tuple2<Integer, String>[],
Integer,
Integer,
Integer,
Object>> {
private static final long serialVersionUID = 2941333602938145526L;
private transient Tuple8 template;
public TupleSource(int numEvents, int numKeys) {
super(numEvents, numKeys);
}
@SuppressWarnings("unchecked")
@Override
protected void init() {
super.init();
template =
MyPojo.createTuple(
0,
"myName",
new String[] {"op1", "op2", "op3", "op4"},
new Tuple2[] {
MyOperation.createTuple(1, "op1"),
MyOperation.createTuple(2, "op2"),
MyOperation.createTuple(3, "op3")
},
1,
2,
3,
"null");
}
@Override
protected Tuple8<
Integer,
String,
String[],
Tuple2<Integer, String>[],
Integer,
Integer,
Integer,
Object>
getElement(int keyId) {
template.setField(keyId, 0);
return template;
}
}
/** Source emitting a {@link Row} based on {@link MyPojo}. */
public static class RowSource extends BaseSourceWithKeyRange<Row>
implements ResultTypeQueryable<Row> {
private static final long serialVersionUID = 2941333602938145526L;
private transient Row template;
public RowSource(int numEvents, int numKeys) {
super(numEvents, numKeys);
}
@SuppressWarnings("unchecked")
@Override
protected void init() {
super.init();
template =
MyPojo.createRow(
0,
"myName",
new String[] {"op1", "op2", "op3", "op4"},
new Row[] {
MyOperation.createRow(1, "op1"),
MyOperation.createRow(2, "op2"),
MyOperation.createRow(3, "op3")
},
1,
2,
3,
"null");
}
@Override
protected Row getElement(int keyId) {
template.setField(0, keyId);
return template;
}
@Override
public TypeInformation<Row> getProducedType() {
return MyPojo.getProducedRowType();
}
}
/** Not so simple POJO. */
@SuppressWarnings({"WeakerAccess", "unused"})
public static class MyPojo {
public int id;
private String name;
private String[] operationNames;
private MyOperation[] operations;
private int otherId1;
private int otherId2;
private int otherId3;
private Object someObject;
public MyPojo() {}
public MyPojo(
int id,
String name,
String[] operationNames,
MyOperation[] operations,
int otherId1,
int otherId2,
int otherId3,
Object someObject) {
this.id = id;
this.name = name;
this.operationNames = operationNames;
this.operations = operations;
this.otherId1 = otherId1;
this.otherId2 = otherId2;
this.otherId3 = otherId3;
this.someObject = someObject;
}
public static Tuple8<
Integer,
String,
String[],
Tuple2<Integer, String>[],
Integer,
Integer,
Integer,
Object>
createTuple(
int id,
String name,
String[] operationNames,
Tuple2<Integer, String>[] operations,
int otherId1,
int otherId2,
int otherId3,
Object someObject) {
return Tuple8.of(
id, name, operationNames, operations, otherId1, otherId2, otherId3, someObject);
}
public static Row createRow(
int id,
String name,
String[] operationNames,
Row[] operations,
int otherId1,
int otherId2,
int otherId3,
Object someObject) {
return Row.of(
id, name, operationNames, operations, otherId1, otherId2, otherId3, someObject);
}
public static TypeInformation<Row> getProducedRowType() {
return Types.ROW(
Types.INT,
Types.STRING,
Types.OBJECT_ARRAY(Types.STRING),
Types.OBJECT_ARRAY(Types.ROW(Types.INT, Types.STRING)),
Types.INT,
Types.INT,
Types.INT,
Types.GENERIC(Object.class));
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String[] getOperationNames() {
return operationNames;
}
public void setOperationNames(String[] operationNames) {
this.operationNames = operationNames;
}
public MyOperation[] getOperations() {
return operations;
}
public void setOperations(MyOperation[] operations) {
this.operations = operations;
}
public int getOtherId1() {
return otherId1;
}
public void setOtherId1(int otherId1) {
this.otherId1 = otherId1;
}
public int getOtherId2() {
return otherId2;
}
public void setOtherId2(int otherId2) {
this.otherId2 = otherId2;
}
public int getOtherId3() {
return otherId3;
}
public void setOtherId3(int otherId3) {
this.otherId3 = otherId3;
}
public Object getSomeObject() {
return someObject;
}
public void setSomeObject(Object someObject) {
this.someObject = someObject;
}
}
/** Another POJO. */
@SuppressWarnings({"WeakerAccess", "unused"})
public static class MyOperation {
protected String name;
int id;
public MyOperation() {}
public MyOperation(int id, String name) {
this.id = id;
this.name = name;
}
public static Tuple2<Integer, String> createTuple(int id, String name) {
return Tuple2.of(id, name);
}
public static Row createRow(int id, String name) {
return Row.of(id, name);
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
}
| 4,612 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/StreamGraphUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.benchmark.functions.LongSource;
import org.apache.flink.runtime.jobgraph.JobType;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.DiscardingSink;
import org.apache.flink.streaming.api.graph.GlobalStreamExchangeMode;
import org.apache.flink.streaming.api.graph.StreamGraph;
/** Utilities for building respective graph for performing in benchmark. */
public class StreamGraphUtils {
public static StreamGraph buildGraphForBatchJob(
StreamExecutionEnvironment env, int numRecords) {
DataStreamSource<Long> source = env.addSource(new LongSource(numRecords));
source.addSink(new DiscardingSink<>());
StreamGraph streamGraph = env.getStreamGraph();
streamGraph.setChaining(false);
streamGraph.setGlobalStreamExchangeMode(GlobalStreamExchangeMode.ALL_EDGES_BLOCKING);
streamGraph.setJobType(JobType.BATCH);
return streamGraph;
}
}
| 4,613 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/TwoInputBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.benchmark.functions.LongSource;
import org.apache.flink.benchmark.functions.QueuingLongSource;
import org.apache.flink.benchmark.operators.MultiplyByTwoCoStreamMap;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.DiscardingSink;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
public class TwoInputBenchmark extends BenchmarkBase {
public static final int RECORDS_PER_INVOCATION = 25_000_000;
public static final int ONE_IDLE_RECORDS_PER_INVOCATION = 15_000_000;
public static final long CHECKPOINT_INTERVAL_MS = 100;
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(".*" + TwoInputBenchmark.class.getCanonicalName() + ".*")
.build();
new Runner(options).run();
}
@Benchmark
@OperationsPerInvocation(value = TwoInputBenchmark.RECORDS_PER_INVOCATION)
public void twoInputMapSink(FlinkEnvironmentContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.enableCheckpointing(CHECKPOINT_INTERVAL_MS);
env.setParallelism(1);
// Setting buffer timeout to 1 is an attempt to improve twoInputMapSink benchmark stability.
// Without 1ms buffer timeout, some JVM forks are much slower then others, making results
// unstable and unreliable.
env.setBufferTimeout(1);
long numRecordsPerInput = RECORDS_PER_INVOCATION / 2;
DataStreamSource<Long> source1 = env.addSource(new LongSource(numRecordsPerInput));
DataStreamSource<Long> source2 = env.addSource(new LongSource(numRecordsPerInput));
source1.connect(source2)
.transform(
"custom operator",
TypeInformation.of(Long.class),
new MultiplyByTwoCoStreamMap())
.addSink(new DiscardingSink<>());
env.execute();
}
@Benchmark
@OperationsPerInvocation(value = TwoInputBenchmark.ONE_IDLE_RECORDS_PER_INVOCATION)
public void twoInputOneIdleMapSink(FlinkEnvironmentContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.enableCheckpointing(CHECKPOINT_INTERVAL_MS);
env.setParallelism(1);
QueuingLongSource.reset();
DataStreamSource<Long> source1 =
env.addSource(new QueuingLongSource(1, ONE_IDLE_RECORDS_PER_INVOCATION - 1));
DataStreamSource<Long> source2 = env.addSource(new QueuingLongSource(2, 1));
source1.connect(source2)
.transform(
"custom operator",
TypeInformation.of(Long.class),
new MultiplyByTwoCoStreamMap())
.addSink(new DiscardingSink<>());
env.execute();
}
}
| 4,614 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/BlockingPartitionRemoteChannelBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.CoreOptions;
import org.apache.flink.configuration.NettyShuffleEnvironmentOptions;
import org.apache.flink.streaming.api.graph.StreamGraph;
import org.apache.flink.streaming.api.graph.StreamingJobGraphGenerator;
import org.apache.flink.util.FileUtils;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
@OperationsPerInvocation(value = BlockingPartitionRemoteChannelBenchmark.RECORDS_PER_INVOCATION)
public class BlockingPartitionRemoteChannelBenchmark extends RemoteBenchmarkBase {
private static final int NUM_VERTICES = 2;
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(BlockingPartitionRemoteChannelBenchmark.class.getCanonicalName())
.build();
new Runner(options).run();
}
@Benchmark
public void remoteFilePartition(RemoteFileEnvironmentContext context) throws Exception {
StreamGraph streamGraph =
StreamGraphUtils.buildGraphForBatchJob(context.env, RECORDS_PER_INVOCATION);
context.miniCluster.executeJobBlocking(
StreamingJobGraphGenerator.createJobGraph(streamGraph));
}
@Benchmark
public void remoteSortPartition(RemoteSortEnvironmentContext context) throws Exception {
StreamGraph streamGraph =
StreamGraphUtils.buildGraphForBatchJob(context.env, RECORDS_PER_INVOCATION);
context.miniCluster.executeJobBlocking(
StreamingJobGraphGenerator.createJobGraph(streamGraph));
}
/** Environment context for specific file based bounded blocking partition. */
public static class BlockingPartitionEnvironmentContext extends RemoteBenchmarkContext {
@Override
public void setUp() throws Exception {
super.setUp();
env.setParallelism(PARALLELISM);
env.setBufferTimeout(-1);
}
protected Configuration createConfiguration(boolean isSortShuffle) {
Configuration configuration = super.createConfiguration();
if (isSortShuffle) {
configuration.setInteger(
NettyShuffleEnvironmentOptions.NETWORK_SORT_SHUFFLE_MIN_PARALLELISM, 1);
} else {
configuration.setInteger(
NettyShuffleEnvironmentOptions.NETWORK_SORT_SHUFFLE_MIN_PARALLELISM,
Integer.MAX_VALUE);
}
configuration.setString(
NettyShuffleEnvironmentOptions.NETWORK_BLOCKING_SHUFFLE_TYPE, "file");
configuration.setString(
CoreOptions.TMP_DIRS,
FileUtils.getCurrentWorkingDirectory().toAbsolutePath().toString());
return configuration;
}
@Override
protected int getNumberOfVertices() {
return NUM_VERTICES;
}
}
public static class RemoteFileEnvironmentContext extends BlockingPartitionEnvironmentContext {
@Override
protected Configuration createConfiguration() {
return createConfiguration(false);
}
}
public static class RemoteSortEnvironmentContext extends BlockingPartitionEnvironmentContext {
@Override
protected Configuration createConfiguration() {
return createConfiguration(true);
}
}
}
| 4,615 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/ContinuousFileReaderOperatorBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.api.common.io.FileInputFormat;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.core.fs.FileInputSplit;
import org.apache.flink.core.fs.Path;
import org.apache.flink.core.testutils.OneShotLatch;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.api.functions.source.ContinuousFileReaderOperatorFactory;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.streaming.api.functions.source.TimestampedFileInputSplit;
import joptsimple.internal.Strings;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
@OperationsPerInvocation(value = ContinuousFileReaderOperatorBenchmark.RECORDS_PER_INVOCATION)
public class ContinuousFileReaderOperatorBenchmark extends BenchmarkBase {
private static final int SPLITS_PER_INVOCATION = 100;
private static final int LINES_PER_SPLIT = 175_000;
public static final int RECORDS_PER_INVOCATION = SPLITS_PER_INVOCATION * LINES_PER_SPLIT;
private static final TimestampedFileInputSplit SPLIT =
new TimestampedFileInputSplit(0, 0, new Path("."), 0, 0, new String[] {});
private static final String LINE = Strings.repeat('0', 10);
// Source should wait until all elements reach sink. Otherwise, END_OF_INPUT is sent once all
// splits are emitted.
// Thus, all subsequent reads in ContinuousFileReaderOperator would be made in CLOSING state in
// a simple while-true loop (MailboxExecutor.isIdle is always true).
private static OneShotLatch TARGET_COUNT_REACHED_LATCH = new OneShotLatch();
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(
".*"
+ ContinuousFileReaderOperatorBenchmark.class
.getCanonicalName()
+ ".*")
.build();
new Runner(options).run();
}
@Benchmark
public void readFileSplit(FlinkEnvironmentContext context) throws Exception {
TARGET_COUNT_REACHED_LATCH.reset();
StreamExecutionEnvironment env = context.env;
env.setRestartStrategy(new RestartStrategies.NoRestartStrategyConfiguration());
env.enableCheckpointing(100)
.setParallelism(1)
.addSource(new MockSourceFunction())
.transform(
"fileReader",
TypeInformation.of(String.class),
new ContinuousFileReaderOperatorFactory<>(new MockInputFormat()))
.addSink(new LimitedSink());
env.execute();
}
private static class MockSourceFunction implements SourceFunction<TimestampedFileInputSplit> {
private volatile boolean isRunning = true;
private int count = 0;
@Override
public void run(SourceContext<TimestampedFileInputSplit> ctx) {
while (isRunning && count < SPLITS_PER_INVOCATION) {
count++;
synchronized (ctx.getCheckpointLock()) {
ctx.collect(SPLIT);
}
}
while (isRunning) {
try {
TARGET_COUNT_REACHED_LATCH.await(100, TimeUnit.MILLISECONDS);
return;
} catch (InterruptedException e) {
if (!isRunning) {
Thread.currentThread().interrupt();
}
} catch (TimeoutException e) {
// continue waiting
}
}
}
@Override
public void cancel() {
isRunning = false;
}
}
private static class MockInputFormat extends FileInputFormat<String> {
private transient int count = 0;
@Override
public boolean reachedEnd() {
return count >= ContinuousFileReaderOperatorBenchmark.LINES_PER_SPLIT;
}
@Override
public String nextRecord(String s) {
count++;
return LINE;
}
@Override
public void open(FileInputSplit fileSplit) {
count = 0;
// prevent super from accessing file
}
@Override
public void configure(Configuration parameters) {
// prevent super from requiring certain settings (input.file.path)
}
}
private static class LimitedSink implements SinkFunction<String> {
private int count;
@Override
public void invoke(String value, Context context) {
if (++count == RECORDS_PER_INVOCATION) {
TARGET_COUNT_REACHED_LATCH.trigger();
}
}
}
}
| 4,616 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/CheckpointingTimeBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.benchmark.operators.RecordSource;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.runtime.jobgraph.JobVertexID;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.graph.StreamGraph;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import java.util.Collections;
import java.util.concurrent.CompletableFuture;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.apache.flink.api.common.eventtime.WatermarkStrategy.noWatermarks;
/**
* The test verifies that the debloating kicks in and properly downsizes buffers. In the end the
* checkpoint should take ~2(number of rebalance) * DEBLOATING_TARGET.
*
* <p>Some info about the chosen numbers:
*
* <ul>
* <li>The minimal memory segment size is decreased (256b) so that the scaling possibility is
* higher. Memory segments start with 8kb
* <li>A memory segment of the minimal size fits ~9 records (of size 29b), each record takes
* ~200ns to be processed by the sink
* <li>We have 2 (exclusive buffers) * 4 (parallelism) + 8 floating = 64 buffers per gate, with
* 300 ms debloating target and ~200ns/record processing speed, we can buffer 1500/64 = ~24
* records in a buffer after debloating which means the size of a buffer (24 * 29 = 696) is
* slightly above the minimal memory segment size.
* <li>The buffer debloating target of 300ms means a checkpoint should take ~2(number of
* exchanges)*300ms=~600ms
* </ul>
*/
@OutputTimeUnit(SECONDS)
public class CheckpointingTimeBenchmark extends BenchmarkBase {
public static final MemorySize DEBLOATING_RECORD_SIZE = MemorySize.parse("1b");
public static final MemorySize UNALIGNED_RECORD_SIZE = MemorySize.parse("1kb");
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(CheckpointingTimeBenchmark.class.getCanonicalName())
.build();
new Runner(options).run();
}
@Benchmark
public void checkpointSingleInput(SingleInputCheckpointEnvironmentContext context)
throws Exception {
final CompletableFuture<String> checkpoint =
context.miniCluster.triggerCheckpoint(context.jobID);
checkpoint.get();
}
@State(Scope.Thread)
public static class SingleInputCheckpointEnvironmentContext
extends CheckpointEnvironmentContext {
public static final int NUM_OF_VERTICES = 3;
@Param({"ALIGNED", "UNALIGNED", "UNALIGNED_1"})
public CheckpointMode mode;
@Override
protected CheckpointMode getMode() {
return mode;
}
@Override
protected StreamGraphWithSources getStreamGraph() {
DataStreamSource<RecordSource.Record> source =
env.fromSource(
new RecordSource(Integer.MAX_VALUE, (int) getRecordSize().getBytes()),
noWatermarks(),
RecordSource.class.getName());
source.slotSharingGroup("source")
.rebalance()
.map((MapFunction<RecordSource.Record, RecordSource.Record>) value -> value)
.slotSharingGroup("map")
.rebalance()
.addSink(new SlowDiscardSink<>())
.slotSharingGroup("sink");
final StreamGraph streamGraph = env.getStreamGraph(false);
final JobVertexID sourceId =
streamGraph
.getJobGraph()
.getVerticesSortedTopologicallyFromSources()
.get(0)
.getID();
return new StreamGraphWithSources(streamGraph, Collections.singletonList(sourceId));
}
private MemorySize getRecordSize() {
return mode == CheckpointMode.ALIGNED
? CheckpointingTimeBenchmark.DEBLOATING_RECORD_SIZE
: CheckpointingTimeBenchmark.UNALIGNED_RECORD_SIZE;
}
@Override
protected int getNumberOfTaskManagers() {
return NUM_OF_VERTICES * JOB_PARALLELISM;
}
}
}
| 4,617 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/KeyByBenchmarks.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.benchmark.functions.BaseSourceWithKeyRange;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.DiscardingSink;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
/** Benchmark for keyBy() on tuples and arrays. */
public class KeyByBenchmarks extends BenchmarkBase {
private static final int TUPLE_RECORDS_PER_INVOCATION = 15_000_000;
private static final int ARRAY_RECORDS_PER_INVOCATION = 7_000_000;
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(".*" + KeyByBenchmarks.class.getCanonicalName() + ".*")
.build();
new Runner(options).run();
}
@Benchmark
@OperationsPerInvocation(value = KeyByBenchmarks.TUPLE_RECORDS_PER_INVOCATION)
public void tupleKeyBy(FlinkEnvironmentContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.setParallelism(4);
env.addSource(new IncreasingTupleSource(TUPLE_RECORDS_PER_INVOCATION, 10))
.keyBy(0)
.addSink(new DiscardingSink<>());
env.execute();
}
@Benchmark
@OperationsPerInvocation(value = KeyByBenchmarks.ARRAY_RECORDS_PER_INVOCATION)
public void arrayKeyBy(FlinkEnvironmentContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.setParallelism(4);
env.addSource(new IncreasingArraySource(ARRAY_RECORDS_PER_INVOCATION, 10))
.keyBy(0)
.addSink(new DiscardingSink<>());
env.execute();
}
private static class IncreasingTupleSource
extends BaseSourceWithKeyRange<Tuple2<Integer, Integer>> {
private static final long serialVersionUID = 2941333602938145526L;
IncreasingTupleSource(int numEvents, int numKeys) {
super(numEvents, numKeys);
}
@Override
protected Tuple2<Integer, Integer> getElement(int keyId) {
return new Tuple2<>(keyId, 1);
}
}
private static class IncreasingArraySource extends BaseSourceWithKeyRange<int[]> {
private static final long serialVersionUID = -7883758559005221998L;
IncreasingArraySource(int numEvents, int numKeys) {
super(numEvents, numKeys);
}
@Override
protected int[] getElement(int keyId) {
return new int[] {keyId, 1};
}
}
}
| 4,618 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/RemoteBenchmarkBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
/** Benchmark base for setting up the cluster to perform remote network shuffle. */
public abstract class RemoteBenchmarkBase extends BenchmarkBase {
protected static final int PARALLELISM = 4;
protected static final int RECORDS_PER_SUBTASK = 10_000_000;
protected static final int RECORDS_PER_INVOCATION = RECORDS_PER_SUBTASK * PARALLELISM;
public abstract static class RemoteBenchmarkContext extends FlinkEnvironmentContext {
@Override
protected int getNumberOfTaskManagers() {
return getNumberOfVertices() * PARALLELISM;
}
@Override
protected int getNumberOfSlotsPerTaskManager() {
return 1;
}
/** @return the number of vertices the respective job graph contains. */
abstract int getNumberOfVertices();
}
}
| 4,619 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/full/PojoSerializationBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark.full;
import org.apache.flink.api.common.ExecutionConfig;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeutils.TypeSerializer;
import org.apache.flink.api.java.typeutils.runtime.kryo.KryoSerializer;
import org.apache.flink.benchmark.BenchmarkBase;
import org.apache.flink.benchmark.SerializationFrameworkMiniBenchmarks;
import org.apache.flink.core.memory.DataInputViewStreamWrapper;
import org.apache.flink.core.memory.DataOutputView;
import org.apache.flink.core.memory.DataOutputViewStreamWrapper;
import org.apache.flink.formats.avro.typeutils.AvroSerializer;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Arrays;
import java.util.concurrent.TimeUnit;
@State(Scope.Benchmark)
@BenchmarkMode({Mode.Throughput})
@OutputTimeUnit(TimeUnit.MILLISECONDS)
public class PojoSerializationBenchmark extends BenchmarkBase {
SerializationFrameworkMiniBenchmarks.MyPojo pojo;
org.apache.flink.benchmark.avro.MyPojo avroPojo;
ExecutionConfig config = new ExecutionConfig();
TypeSerializer<SerializationFrameworkMiniBenchmarks.MyPojo> pojoSerializer =
TypeInformation.of(SerializationFrameworkMiniBenchmarks.MyPojo.class)
.createSerializer(config);
TypeSerializer<SerializationFrameworkMiniBenchmarks.MyPojo> kryoSerializer =
new KryoSerializer<>(SerializationFrameworkMiniBenchmarks.MyPojo.class, config);
TypeSerializer<org.apache.flink.benchmark.avro.MyPojo> avroSerializer =
new AvroSerializer<>(org.apache.flink.benchmark.avro.MyPojo.class);
ByteArrayInputStream pojoBuffer;
ByteArrayInputStream avroBuffer;
ByteArrayInputStream kryoBuffer;
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(".*" + PojoSerializationBenchmark.class.getCanonicalName() + ".*")
.build();
new Runner(options).run();
}
@Setup
public void setup() throws IOException {
pojo =
new SerializationFrameworkMiniBenchmarks.MyPojo(
0,
"myName",
new String[] {"op1", "op2", "op3", "op4"},
new SerializationFrameworkMiniBenchmarks.MyOperation[] {
new SerializationFrameworkMiniBenchmarks.MyOperation(1, "op1"),
new SerializationFrameworkMiniBenchmarks.MyOperation(2, "op2"),
new SerializationFrameworkMiniBenchmarks.MyOperation(3, "op3")
},
1,
2,
3,
"null");
avroPojo =
new org.apache.flink.benchmark.avro.MyPojo(
0,
"myName",
Arrays.asList("op1", "op2", "op3", "op4"),
Arrays.asList(
new org.apache.flink.benchmark.avro.MyOperation(1, "op1"),
new org.apache.flink.benchmark.avro.MyOperation(2, "op2"),
new org.apache.flink.benchmark.avro.MyOperation(3, "op3")),
1,
2,
3,
"null");
pojoBuffer = new ByteArrayInputStream(write(pojoSerializer, pojo));
avroBuffer = new ByteArrayInputStream(write(avroSerializer, avroPojo));
kryoBuffer = new ByteArrayInputStream(write(kryoSerializer, pojo));
}
@Benchmark
public byte[] writePojo() throws IOException {
return write(pojoSerializer, pojo);
}
@Benchmark
public byte[] writeAvro() throws IOException {
return write(avroSerializer, avroPojo);
}
@Benchmark
public byte[] writeKryo() throws IOException {
return write(kryoSerializer, pojo);
}
@Benchmark
public SerializationFrameworkMiniBenchmarks.MyPojo readPojo() throws IOException {
pojoBuffer.reset();
return pojoSerializer.deserialize(new DataInputViewStreamWrapper(pojoBuffer));
}
@Benchmark
public SerializationFrameworkMiniBenchmarks.MyPojo readKryo() throws IOException {
kryoBuffer.reset();
return kryoSerializer.deserialize(new DataInputViewStreamWrapper(kryoBuffer));
}
@Benchmark
public org.apache.flink.benchmark.avro.MyPojo readAvro() throws IOException {
avroBuffer.reset();
return avroSerializer.deserialize(new DataInputViewStreamWrapper(avroBuffer));
}
private <T> byte[] write(TypeSerializer<T> serializer, T value) throws IOException {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
DataOutputView out = new DataOutputViewStreamWrapper(buffer);
serializer.serialize(value, out);
return buffer.toByteArray();
}
}
| 4,620 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/full/StringSerializationBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark.full;
import org.apache.flink.api.common.ExecutionConfig;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeutils.TypeSerializer;
import org.apache.flink.benchmark.BenchmarkBase;
import org.apache.flink.core.memory.DataInputView;
import org.apache.flink.core.memory.DataInputViewStreamWrapper;
import org.apache.flink.core.memory.DataOutputView;
import org.apache.flink.core.memory.DataOutputViewStreamWrapper;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Random;
import java.util.concurrent.TimeUnit;
@State(Scope.Benchmark)
@BenchmarkMode({Mode.Throughput})
@OutputTimeUnit(TimeUnit.MILLISECONDS)
public class StringSerializationBenchmark extends BenchmarkBase {
public static final char[] asciiChars =
"qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM1234567890".toCharArray();
public static final char[] russianChars =
"йцукенгшщзхъфывапролджэячсмитьбюЙЦУКЕНГШЩЗХЪФЫВАПРОЛДЖЭЯЧСМИТЬБЮ".toCharArray();
public static final char[] chineseChars =
"的是不了人我在有他这为之大来以个中上们到国说和地也子要时道出而于就下得可你年生".toCharArray();
@Param({"ascii", "russian", "chinese"})
public String type;
@Param({"4", "128", "16384"})
public String lengthStr;
int length;
String input;
ExecutionConfig config = new ExecutionConfig();
TypeSerializer<String> serializer = TypeInformation.of(String.class).createSerializer(config);
ByteArrayInputStream serializedBuffer;
DataInputView serializedStream;
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(
".*" + StringSerializationBenchmark.class.getCanonicalName() + ".*")
.build();
new Runner(options).run();
}
@Setup
public void setup() throws IOException {
length = Integer.parseInt(lengthStr);
switch (type) {
case "ascii":
input = generate(asciiChars, length);
break;
case "russian":
input = generate(russianChars, length);
break;
case "chinese":
input = generate(chineseChars, length);
break;
default:
throw new IllegalArgumentException(type + "charset is not supported");
}
byte[] stringBytes = stringWrite();
serializedBuffer = new ByteArrayInputStream(stringBytes);
serializedStream = new DataInputViewStreamWrapper(serializedBuffer);
}
@Benchmark
public byte[] stringWrite() throws IOException {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
DataOutputView out = new DataOutputViewStreamWrapper(buffer);
serializer.serialize(input, out);
return buffer.toByteArray();
}
@Benchmark
public String stringRead() throws IOException {
serializedBuffer.reset();
return serializer.deserialize(serializedStream);
}
private String generate(char[] charset, int length) {
char[] buffer = new char[length];
Random random = new Random();
for (int i = 0; i < length; i++) {
buffer[i] = charset[random.nextInt(charset.length)];
}
return new String(buffer);
}
}
| 4,621 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/full/SerializationFrameworkAllBenchmarks.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark.full;
import org.apache.flink.api.common.ExecutionConfig;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
import org.apache.flink.benchmark.FlinkEnvironmentContext;
import org.apache.flink.benchmark.SerializationFrameworkMiniBenchmarks;
import org.apache.flink.benchmark.functions.BaseSourceWithKeyRange;
import org.apache.flink.benchmark.functions.ScalaADTSource;
import org.apache.flink.formats.avro.typeutils.GenericRecordAvroTypeInfo;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.DiscardingSink;
import com.twitter.chill.protobuf.ProtobufSerializer;
import com.twitter.chill.thrift.TBaseSerializer;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.util.Arrays;
/** Benchmark for serializing POJOs and Tuples with different serialization frameworks. */
public class SerializationFrameworkAllBenchmarks extends SerializationFrameworkMiniBenchmarks {
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(
".*"
+ SerializationFrameworkAllBenchmarks.class
.getCanonicalName()
+ ".*")
.build();
new Runner(options).run();
}
@Benchmark
@OperationsPerInvocation(value = RECORDS_PER_INVOCATION)
public void serializerPojoWithoutRegistration(FlinkEnvironmentContext context)
throws Exception {
StreamExecutionEnvironment env = context.env;
env.setParallelism(4);
env.addSource(new PojoSource(RECORDS_PER_INVOCATION, 10))
.rebalance()
.addSink(new DiscardingSink<>());
env.execute();
}
@Benchmark
@OperationsPerInvocation(value = RECORDS_PER_INVOCATION)
public void serializerKryoWithoutRegistration(FlinkEnvironmentContext context)
throws Exception {
StreamExecutionEnvironment env = context.env;
env.setParallelism(4);
env.getConfig().enableForceKryo();
env.addSource(new PojoSource(RECORDS_PER_INVOCATION, 10))
.rebalance()
.addSink(new DiscardingSink<>());
env.execute();
}
@Benchmark
@OperationsPerInvocation(value = RECORDS_PER_INVOCATION)
public void serializerAvroReflect(FlinkEnvironmentContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.setParallelism(4);
env.getConfig().enableForceAvro();
env.addSource(new PojoSource(RECORDS_PER_INVOCATION, 10))
.rebalance()
.addSink(new DiscardingSink<>());
env.execute();
}
@Benchmark
@OperationsPerInvocation(value = RECORDS_PER_INVOCATION)
public void serializerAvroGeneric(FlinkEnvironmentContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.setParallelism(4);
Schema schema = AvroGenericRecordSource.loadSchema();
env.addSource(new AvroGenericRecordSource(RECORDS_PER_INVOCATION, 10, schema))
.rebalance()
.addSink(new DiscardingSink<>());
env.execute();
}
@Benchmark
@OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION)
public void serializerScalaADT(FlinkEnvironmentContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.setParallelism(4);
env.addSource(new ScalaADTSource(RECORDS_PER_INVOCATION), ScalaADTSource.adtTypeInfo())
.rebalance()
.addSink(new DiscardingSink<>());
env.execute();
}
@Benchmark
@OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION)
public void serializerKryoThrift(FlinkEnvironmentContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.setParallelism(4);
ExecutionConfig executionConfig = env.getConfig();
executionConfig.enableForceKryo();
executionConfig.addDefaultKryoSerializer(
org.apache.flink.benchmark.thrift.MyPojo.class, TBaseSerializer.class);
executionConfig.addDefaultKryoSerializer(
org.apache.flink.benchmark.thrift.MyOperation.class, TBaseSerializer.class);
env.addSource(new ThriftPojoSource(RECORDS_PER_INVOCATION, 10))
.rebalance()
.addSink(new DiscardingSink<>());
env.execute();
}
@Benchmark
@OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION)
public void serializerKryoProtobuf(FlinkEnvironmentContext context) throws Exception {
StreamExecutionEnvironment env = context.env;
env.setParallelism(4);
ExecutionConfig executionConfig = env.getConfig();
executionConfig.enableForceKryo();
executionConfig.registerTypeWithKryoSerializer(
org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyPojo.class,
ProtobufSerializer.class);
executionConfig.registerTypeWithKryoSerializer(
org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyOperation.class,
ProtobufSerializer.class);
env.addSource(new ProtobufPojoSource(RECORDS_PER_INVOCATION, 10))
.rebalance()
.addSink(new DiscardingSink<>());
env.execute();
}
/** Source emitting an Avro GenericRecord. */
public static class AvroGenericRecordSource extends BaseSourceWithKeyRange<GenericRecord>
implements ResultTypeQueryable<GenericRecord> {
private static final long serialVersionUID = 2941333602938145526L;
private final GenericRecordAvroTypeInfo producedType;
private final String schemaString;
private transient Schema myPojoSchema;
private transient GenericRecord template;
public AvroGenericRecordSource(int numEvents, int numKeys, Schema schema) {
super(numEvents, numKeys);
this.producedType = new GenericRecordAvroTypeInfo(schema);
this.myPojoSchema = schema;
this.schemaString = schema.toString();
}
private static Schema loadSchema() throws IOException {
ClassLoader classLoader = ClassLoader.getSystemClassLoader();
try (InputStream is = classLoader.getResourceAsStream("avro/mypojo.avsc")) {
if (is == null) {
throw new FileNotFoundException("File 'mypojo.avsc' not found");
}
return new Schema.Parser().parse(is);
}
}
@Override
protected void init() {
super.init();
if (myPojoSchema == null) {
this.myPojoSchema = new Schema.Parser().parse(schemaString);
}
Schema myOperationSchema =
myPojoSchema.getField("operations").schema().getElementType();
template = new GenericData.Record(myPojoSchema);
template.put("id", 0);
template.put("name", "myName");
template.put("operationName", Arrays.asList("op1", "op2", "op3", "op4"));
GenericData.Record op1 = new GenericData.Record(myOperationSchema);
op1.put("id", 1);
op1.put("name", "op1");
GenericData.Record op2 = new GenericData.Record(myOperationSchema);
op2.put("id", 2);
op2.put("name", "op2");
GenericData.Record op3 = new GenericData.Record(myOperationSchema);
op3.put("id", 3);
op3.put("name", "op3");
template.put("operations", Arrays.asList(op1, op2, op3));
template.put("otherId1", 1);
template.put("otherId2", 2);
template.put("otherId3", 3);
template.put("nullable", "null");
}
@Override
protected GenericRecord getElement(int keyId) {
template.put("id", keyId);
return template;
}
@Override
public TypeInformation<GenericRecord> getProducedType() {
return producedType;
}
}
/**
* Source emitting a {@link org.apache.flink.benchmark.thrift.MyPojo POJO} generated by an
* Apache Thrift schema.
*/
public static class ThriftPojoSource
extends BaseSourceWithKeyRange<org.apache.flink.benchmark.thrift.MyPojo> {
private static final long serialVersionUID = 2941333602938145526L;
private transient org.apache.flink.benchmark.thrift.MyPojo template;
public ThriftPojoSource(int numEvents, int numKeys) {
super(numEvents, numKeys);
}
@Override
protected void init() {
super.init();
template =
new org.apache.flink.benchmark.thrift.MyPojo(
0,
"myName",
Arrays.asList("op1", "op2", "op3", "op4"),
Arrays.asList(
new org.apache.flink.benchmark.thrift.MyOperation(1, "op1"),
new org.apache.flink.benchmark.thrift.MyOperation(2, "op2"),
new org.apache.flink.benchmark.thrift.MyOperation(3, "op3")),
1,
2,
3);
template.setSomeObject("null");
}
@Override
protected org.apache.flink.benchmark.thrift.MyPojo getElement(int keyId) {
template.setId(keyId);
return template;
}
}
/**
* Source emitting a {@link org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyPojo POJO}
* generated by a Protobuf schema.
*/
public static class ProtobufPojoSource
extends BaseSourceWithKeyRange<
org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyPojo> {
private static final long serialVersionUID = 2941333602938145526L;
private transient org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyPojo template;
public ProtobufPojoSource(int numEvents, int numKeys) {
super(numEvents, numKeys);
}
@Override
protected void init() {
super.init();
template =
org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyPojo.newBuilder()
.setId(0)
.setName("myName")
.addAllOperationName(Arrays.asList("op1", "op2", "op3", "op4"))
.addOperations(
org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyOperation
.newBuilder()
.setId(1)
.setName("op1"))
.addOperations(
org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyOperation
.newBuilder()
.setId(2)
.setName("op2"))
.addOperations(
org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyOperation
.newBuilder()
.setId(3)
.setName("op3"))
.setOtherId1(1)
.setOtherId2(2)
.setOtherId3(3)
.setSomeObject("null")
.build();
}
@Override
protected org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyPojo getElement(
int keyId) {
return org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyPojo.newBuilder(template)
.setId(keyId)
.build();
}
}
}
| 4,622 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/full/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This package contains an extended benchmark set which is not used for regression tests but rather
* for performance overview of certain parts of the code.
*
* <p>Please consider moving benchmarks here to keep the amount of regression benchmarks small.
*/
package org.apache.flink.benchmark.full;
| 4,623 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/operators/MultiplyByTwoCoStreamMap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark.operators;
import org.apache.flink.streaming.api.operators.AbstractStreamOperator;
import org.apache.flink.streaming.api.operators.TwoInputStreamOperator;
import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
public class MultiplyByTwoCoStreamMap extends AbstractStreamOperator<Long>
implements TwoInputStreamOperator<Long, Long, Long> {
@Override
public void processElement1(StreamRecord<Long> element) {
output.collect(element.replace(element.getValue() * 2));
}
@Override
public void processElement2(StreamRecord<Long> element) {
output.collect(element.replace(element.getValue() * 2));
}
}
| 4,624 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/operators/RecordSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark.operators;
import org.apache.flink.api.connector.source.Boundedness;
import org.apache.flink.api.connector.source.ReaderOutput;
import org.apache.flink.api.connector.source.Source;
import org.apache.flink.api.connector.source.SourceReader;
import org.apache.flink.api.connector.source.SourceReaderContext;
import org.apache.flink.api.connector.source.SourceSplit;
import org.apache.flink.api.connector.source.SplitEnumerator;
import org.apache.flink.api.connector.source.SplitEnumeratorContext;
import org.apache.flink.benchmark.operators.RecordSource.EmptyEnumeratorState;
import org.apache.flink.benchmark.operators.RecordSource.EmptySplit;
import org.apache.flink.benchmark.operators.RecordSource.Record;
import org.apache.flink.core.io.InputStatus;
import org.apache.flink.core.io.SimpleVersionedSerializer;
import javax.annotation.Nullable;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CompletableFuture;
/** A source that generates longs in a fixed number of splits. */
public class RecordSource implements Source<Record, EmptySplit, EmptyEnumeratorState> {
public static final int DEFAULT_PAYLOAD_SIZE = 1024;
private final int recordSize;
private final int minCheckpoints;
public RecordSource(int minCheckpoints) {
this(minCheckpoints, DEFAULT_PAYLOAD_SIZE);
}
public RecordSource(int minCheckpoints, int recordSize) {
this.minCheckpoints = minCheckpoints;
this.recordSize = recordSize;
}
@Override
public Boundedness getBoundedness() {
return Boundedness.CONTINUOUS_UNBOUNDED;
}
@Override
public SourceReader<Record, EmptySplit> createReader(SourceReaderContext readerContext) {
return new RecordSourceReader(minCheckpoints, recordSize);
}
@Override
public SplitEnumerator<EmptySplit, EmptyEnumeratorState> createEnumerator(
SplitEnumeratorContext<EmptySplit> enumContext) {
return new EmptySplitSplitEnumerator();
}
@Override
public SplitEnumerator<EmptySplit, EmptyEnumeratorState> restoreEnumerator(
SplitEnumeratorContext<EmptySplit> enumContext, EmptyEnumeratorState state) {
return new EmptySplitSplitEnumerator();
}
@Override
public SimpleVersionedSerializer<EmptySplit> getSplitSerializer() {
return new SplitVersionedSerializer();
}
@Override
public SimpleVersionedSerializer<EmptyEnumeratorState> getEnumeratorCheckpointSerializer() {
return new EnumeratorVersionedSerializer();
}
public static class Record {
public long value;
public byte[] payload;
public Record() {
this(0, DEFAULT_PAYLOAD_SIZE);
}
public Record(long value, int recordSize) {
this.value = value;
payload = new byte[recordSize];
}
}
public static class RecordSourceReader implements SourceReader<Record, EmptySplit> {
private final int minCheckpoints;
private final int recordSize;
private int numCompletedCheckpoints;
private long counter = 0;
public RecordSourceReader(int minCheckpoints, int recordSize) {
this.minCheckpoints = minCheckpoints;
this.recordSize = recordSize;
}
@Override
public void start() {}
@Override
public InputStatus pollNext(ReaderOutput<Record> output) throws InterruptedException {
output.collect(new Record(counter++, recordSize));
if (numCompletedCheckpoints >= minCheckpoints) {
return InputStatus.END_OF_INPUT;
}
return InputStatus.MORE_AVAILABLE;
}
@Override
public List<EmptySplit> snapshotState(long checkpointId) {
return Collections.emptyList();
}
@Override
public void notifyCheckpointComplete(long checkpointId) {
numCompletedCheckpoints++;
}
@Override
public CompletableFuture<Void> isAvailable() {
return CompletableFuture.completedFuture(null);
}
@Override
public void addSplits(List<EmptySplit> splits) {}
@Override
public void notifyNoMoreSplits() {}
@Override
public void close() throws Exception {}
}
public static class EmptySplit implements SourceSplit {
@Override
public String splitId() {
return "42";
}
}
private static class EmptySplitSplitEnumerator
implements SplitEnumerator<EmptySplit, EmptyEnumeratorState> {
@Override
public void start() {}
@Override
public void handleSplitRequest(int subtaskId, @Nullable String requesterHostname) {}
@Override
public void addSplitsBack(List<EmptySplit> splits, int subtaskId) {}
@Override
public void addReader(int subtaskId) {}
@Override
public void notifyCheckpointComplete(long checkpointId) {}
@Override
public EmptyEnumeratorState snapshotState(long checkpointId) throws Exception {
return new EmptyEnumeratorState();
}
@Override
public void close() throws IOException {}
}
public static class EmptyEnumeratorState {}
private static class EnumeratorVersionedSerializer
implements SimpleVersionedSerializer<EmptyEnumeratorState> {
@Override
public int getVersion() {
return 0;
}
@Override
public byte[] serialize(EmptyEnumeratorState state) {
return new byte[0];
}
@Override
public EmptyEnumeratorState deserialize(int version, byte[] serialized) {
return new EmptyEnumeratorState();
}
}
private static class SplitVersionedSerializer implements SimpleVersionedSerializer<EmptySplit> {
@Override
public int getVersion() {
return 0;
}
@Override
public byte[] serialize(EmptySplit split) {
return new byte[0];
}
@Override
public EmptySplit deserialize(int version, byte[] serialized) {
return new EmptySplit();
}
}
}
| 4,625 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/operators/MultiplyByTwoOperatorFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark.operators;
import org.apache.flink.streaming.api.operators.AbstractInput;
import org.apache.flink.streaming.api.operators.AbstractStreamOperatorFactory;
import org.apache.flink.streaming.api.operators.AbstractStreamOperatorV2;
import org.apache.flink.streaming.api.operators.Input;
import org.apache.flink.streaming.api.operators.MultipleInputStreamOperator;
import org.apache.flink.streaming.api.operators.StreamOperator;
import org.apache.flink.streaming.api.operators.StreamOperatorParameters;
import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
import java.util.Arrays;
import java.util.List;
@SuppressWarnings({"unchecked", "rawtypes"})
public class MultiplyByTwoOperatorFactory extends AbstractStreamOperatorFactory<Long> {
@Override
public <T extends StreamOperator<Long>> T createStreamOperator(
StreamOperatorParameters<Long> parameters) {
return (T) new MultiplyByTwoOperator(parameters);
}
@Override
public Class<? extends StreamOperator> getStreamOperatorClass(ClassLoader classLoader) {
return MultiplyByTwoOperator.class;
}
public static class MultiplyByTwoOperator extends AbstractStreamOperatorV2<Long>
implements MultipleInputStreamOperator<Long> {
public MultiplyByTwoOperator(StreamOperatorParameters<Long> parameters) {
super(parameters, 2);
}
@Override
public List<Input> getInputs() {
return Arrays.asList(
new MultiplyByTwoOperator.MultiplyByTwoInput(this, 1),
new MultiplyByTwoOperator.MultiplyByTwoInput(this, 2));
}
private static class MultiplyByTwoInput extends AbstractInput<Long, Long> {
MultiplyByTwoInput(AbstractStreamOperatorV2<Long> owner, int inputId) {
super(owner, inputId);
}
@Override
public void processElement(StreamRecord<Long> element) {
output.collect(element.replace(element.getValue() * 2));
}
}
}
}
| 4,626 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/thrift/MyOperation.java | /**
* Autogenerated by Thrift Compiler (0.13.0)
*
* <p>DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
*
* @generated
*/
package org.apache.flink.benchmark.thrift;
@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
@javax.annotation.Generated(
value = "Autogenerated by Thrift Compiler (0.13.0)",
date = "2020-03-06")
public class MyOperation
implements org.apache.thrift.TBase<MyOperation, MyOperation._Fields>,
java.io.Serializable,
Cloneable,
Comparable<MyOperation> {
public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData>
metaDataMap;
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
new org.apache.thrift.protocol.TStruct("MyOperation");
private static final org.apache.thrift.protocol.TField ID_FIELD_DESC =
new org.apache.thrift.protocol.TField(
"id", org.apache.thrift.protocol.TType.I32, (short) 1);
private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC =
new org.apache.thrift.protocol.TField(
"name", org.apache.thrift.protocol.TType.STRING, (short) 2);
private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
new MyOperationStandardSchemeFactory();
private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
new MyOperationTupleSchemeFactory();
// isset id assignments
private static final int __ID_ISSET_ID = 0;
static {
java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(
_Fields.class);
tmpMap.put(
_Fields.ID,
new org.apache.thrift.meta_data.FieldMetaData(
"id",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.I32, "int")));
tmpMap.put(
_Fields.NAME,
new org.apache.thrift.meta_data.FieldMetaData(
"name",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.STRING)));
metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(
MyOperation.class, metaDataMap);
}
public int id; // required
public @org.apache.thrift.annotation.Nullable java.lang.String name; // required
private byte __isset_bitfield = 0;
public MyOperation() {}
public MyOperation(int id, java.lang.String name) {
this();
this.id = id;
setIdIsSet(true);
this.name = name;
}
/** Performs a deep copy on <i>other</i>. */
public MyOperation(MyOperation other) {
__isset_bitfield = other.__isset_bitfield;
this.id = other.id;
if (other.isSetName()) {
this.name = other.name;
}
}
private static <S extends org.apache.thrift.scheme.IScheme> S scheme(
org.apache.thrift.protocol.TProtocol proto) {
return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
? STANDARD_SCHEME_FACTORY
: TUPLE_SCHEME_FACTORY)
.getScheme();
}
public MyOperation deepCopy() {
return new MyOperation(this);
}
@Override
public void clear() {
setIdIsSet(false);
this.id = 0;
this.name = null;
}
public int getId() {
return this.id;
}
public MyOperation setId(int id) {
this.id = id;
setIdIsSet(true);
return this;
}
public void unsetId() {
__isset_bitfield =
org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __ID_ISSET_ID);
}
/** Returns true if field id is set (has been assigned a value) and false otherwise */
public boolean isSetId() {
return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __ID_ISSET_ID);
}
public void setIdIsSet(boolean value) {
__isset_bitfield =
org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __ID_ISSET_ID, value);
}
@org.apache.thrift.annotation.Nullable
public java.lang.String getName() {
return this.name;
}
public MyOperation setName(@org.apache.thrift.annotation.Nullable java.lang.String name) {
this.name = name;
return this;
}
public void unsetName() {
this.name = null;
}
/** Returns true if field name is set (has been assigned a value) and false otherwise */
public boolean isSetName() {
return this.name != null;
}
public void setNameIsSet(boolean value) {
if (!value) {
this.name = null;
}
}
public void setFieldValue(
_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
switch (field) {
case ID:
if (value == null) {
unsetId();
} else {
setId((java.lang.Integer) value);
}
break;
case NAME:
if (value == null) {
unsetName();
} else {
setName((java.lang.String) value);
}
break;
}
}
@org.apache.thrift.annotation.Nullable
public java.lang.Object getFieldValue(_Fields field) {
switch (field) {
case ID:
return getId();
case NAME:
return getName();
}
throw new java.lang.IllegalStateException();
}
/**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new java.lang.IllegalArgumentException();
}
switch (field) {
case ID:
return isSetId();
case NAME:
return isSetName();
}
throw new java.lang.IllegalStateException();
}
@Override
public boolean equals(java.lang.Object that) {
if (that == null) return false;
if (that instanceof MyOperation) return this.equals((MyOperation) that);
return false;
}
public boolean equals(MyOperation that) {
if (that == null) return false;
if (this == that) return true;
boolean this_present_id = true;
boolean that_present_id = true;
if (this_present_id || that_present_id) {
if (!(this_present_id && that_present_id)) return false;
if (this.id != that.id) return false;
}
boolean this_present_name = true && this.isSetName();
boolean that_present_name = true && that.isSetName();
if (this_present_name || that_present_name) {
if (!(this_present_name && that_present_name)) return false;
if (!this.name.equals(that.name)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 1;
hashCode = hashCode * 8191 + id;
hashCode = hashCode * 8191 + ((isSetName()) ? 131071 : 524287);
if (isSetName()) hashCode = hashCode * 8191 + name.hashCode();
return hashCode;
}
@Override
public int compareTo(MyOperation other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = java.lang.Boolean.valueOf(isSetId()).compareTo(other.isSetId());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetId()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.id, other.id);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = java.lang.Boolean.valueOf(isSetName()).compareTo(other.isSetName());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetName()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
@org.apache.thrift.annotation.Nullable
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot)
throws org.apache.thrift.TException {
scheme(iprot).read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot)
throws org.apache.thrift.TException {
scheme(oprot).write(oprot, this);
}
@Override
public java.lang.String toString() {
java.lang.StringBuilder sb = new java.lang.StringBuilder("MyOperation(");
boolean first = true;
sb.append("id:");
sb.append(this.id);
first = false;
if (!first) sb.append(", ");
sb.append("name:");
if (this.name == null) {
sb.append("null");
} else {
sb.append(this.name);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(
new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in)
throws java.io.IOException, java.lang.ClassNotFoundException {
try {
// it doesn't seem like you should have to do this, but java serialization is wacky, and
// doesn't call the default constructor.
__isset_bitfield = 0;
read(
new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
/**
* The set of fields this struct contains, along with convenience methods for finding and
* manipulating them.
*/
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
ID((short) 1, "id"),
NAME((short) 2, "name");
private static final java.util.Map<java.lang.String, _Fields> byName =
new java.util.HashMap<java.lang.String, _Fields>();
static {
for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
private final short _thriftId;
private final java.lang.String _fieldName;
_Fields(short thriftId, java.lang.String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
/** Find the _Fields constant that matches fieldId, or null if its not found. */
@org.apache.thrift.annotation.Nullable
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
case 1: // ID
return ID;
case 2: // NAME
return NAME;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new java.lang.IllegalArgumentException(
"Field " + fieldId + " doesn't exist!");
return fields;
}
/** Find the _Fields constant that matches name, or null if its not found. */
@org.apache.thrift.annotation.Nullable
public static _Fields findByName(java.lang.String name) {
return byName.get(name);
}
public short getThriftFieldId() {
return _thriftId;
}
public java.lang.String getFieldName() {
return _fieldName;
}
}
private static class MyOperationStandardSchemeFactory
implements org.apache.thrift.scheme.SchemeFactory {
public MyOperationStandardScheme getScheme() {
return new MyOperationStandardScheme();
}
}
private static class MyOperationStandardScheme
extends org.apache.thrift.scheme.StandardScheme<MyOperation> {
public void read(org.apache.thrift.protocol.TProtocol iprot, MyOperation struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true) {
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // ID
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.id = iprot.readI32();
struct.setIdIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 2: // NAME
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.name = iprot.readString();
struct.setNameIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate
// method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, MyOperation struct)
throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldBegin(ID_FIELD_DESC);
oprot.writeI32(struct.id);
oprot.writeFieldEnd();
if (struct.name != null) {
oprot.writeFieldBegin(NAME_FIELD_DESC);
oprot.writeString(struct.name);
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class MyOperationTupleSchemeFactory
implements org.apache.thrift.scheme.SchemeFactory {
public MyOperationTupleScheme getScheme() {
return new MyOperationTupleScheme();
}
}
private static class MyOperationTupleScheme
extends org.apache.thrift.scheme.TupleScheme<MyOperation> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, MyOperation struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TTupleProtocol oprot =
(org.apache.thrift.protocol.TTupleProtocol) prot;
java.util.BitSet optionals = new java.util.BitSet();
if (struct.isSetId()) {
optionals.set(0);
}
if (struct.isSetName()) {
optionals.set(1);
}
oprot.writeBitSet(optionals, 2);
if (struct.isSetId()) {
oprot.writeI32(struct.id);
}
if (struct.isSetName()) {
oprot.writeString(struct.name);
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, MyOperation struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TTupleProtocol iprot =
(org.apache.thrift.protocol.TTupleProtocol) prot;
java.util.BitSet incoming = iprot.readBitSet(2);
if (incoming.get(0)) {
struct.id = iprot.readI32();
struct.setIdIsSet(true);
}
if (incoming.get(1)) {
struct.name = iprot.readString();
struct.setNameIsSet(true);
}
}
}
}
| 4,627 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/thrift/MyPojo.java | /**
* Autogenerated by Thrift Compiler (0.13.0)
*
* <p>DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
*
* @generated
*/
package org.apache.flink.benchmark.thrift;
@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
@javax.annotation.Generated(
value = "Autogenerated by Thrift Compiler (0.13.0)",
date = "2020-03-06")
public class MyPojo
implements org.apache.thrift.TBase<MyPojo, MyPojo._Fields>,
java.io.Serializable,
Cloneable,
Comparable<MyPojo> {
public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData>
metaDataMap;
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
new org.apache.thrift.protocol.TStruct("MyPojo");
private static final org.apache.thrift.protocol.TField ID_FIELD_DESC =
new org.apache.thrift.protocol.TField(
"id", org.apache.thrift.protocol.TType.I32, (short) 1);
private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC =
new org.apache.thrift.protocol.TField(
"name", org.apache.thrift.protocol.TType.STRING, (short) 2);
private static final org.apache.thrift.protocol.TField OPERATION_NAME_FIELD_DESC =
new org.apache.thrift.protocol.TField(
"operationName", org.apache.thrift.protocol.TType.LIST, (short) 3);
private static final org.apache.thrift.protocol.TField OPERATIONS_FIELD_DESC =
new org.apache.thrift.protocol.TField(
"operations", org.apache.thrift.protocol.TType.LIST, (short) 4);
private static final org.apache.thrift.protocol.TField OTHER_ID1_FIELD_DESC =
new org.apache.thrift.protocol.TField(
"otherId1", org.apache.thrift.protocol.TType.I32, (short) 5);
private static final org.apache.thrift.protocol.TField OTHER_ID2_FIELD_DESC =
new org.apache.thrift.protocol.TField(
"otherId2", org.apache.thrift.protocol.TType.I32, (short) 6);
private static final org.apache.thrift.protocol.TField OTHER_ID3_FIELD_DESC =
new org.apache.thrift.protocol.TField(
"otherId3", org.apache.thrift.protocol.TType.I32, (short) 7);
private static final org.apache.thrift.protocol.TField SOME_OBJECT_FIELD_DESC =
new org.apache.thrift.protocol.TField(
"someObject", org.apache.thrift.protocol.TType.STRING, (short) 8);
private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
new MyPojoStandardSchemeFactory();
private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
new MyPojoTupleSchemeFactory();
// isset id assignments
private static final int __ID_ISSET_ID = 0;
private static final int __OTHERID1_ISSET_ID = 1;
private static final int __OTHERID2_ISSET_ID = 2;
private static final int __OTHERID3_ISSET_ID = 3;
private static final _Fields optionals[] = {_Fields.SOME_OBJECT};
static {
java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(
_Fields.class);
tmpMap.put(
_Fields.ID,
new org.apache.thrift.meta_data.FieldMetaData(
"id",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.I32, "int")));
tmpMap.put(
_Fields.NAME,
new org.apache.thrift.meta_data.FieldMetaData(
"name",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(
_Fields.OPERATION_NAME,
new org.apache.thrift.meta_data.FieldMetaData(
"operationName",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.ListMetaData(
org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.STRING))));
tmpMap.put(
_Fields.OPERATIONS,
new org.apache.thrift.meta_data.FieldMetaData(
"operations",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.ListMetaData(
org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.STRUCT, "MyOperation"))));
tmpMap.put(
_Fields.OTHER_ID1,
new org.apache.thrift.meta_data.FieldMetaData(
"otherId1",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.I32, "int")));
tmpMap.put(
_Fields.OTHER_ID2,
new org.apache.thrift.meta_data.FieldMetaData(
"otherId2",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.I32, "int")));
tmpMap.put(
_Fields.OTHER_ID3,
new org.apache.thrift.meta_data.FieldMetaData(
"otherId3",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.I32, "int")));
tmpMap.put(
_Fields.SOME_OBJECT,
new org.apache.thrift.meta_data.FieldMetaData(
"someObject",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.STRING)));
metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(MyPojo.class, metaDataMap);
}
public int id; // required
public @org.apache.thrift.annotation.Nullable java.lang.String name; // required
public @org.apache.thrift.annotation.Nullable java.util.List<java.lang.String>
operationName; // required
public @org.apache.thrift.annotation.Nullable java.util.List<MyOperation>
operations; // required
public int otherId1; // required
public int otherId2; // required
public int otherId3; // required
public @org.apache.thrift.annotation.Nullable java.lang.String someObject; // optional
private byte __isset_bitfield = 0;
public MyPojo() {}
public MyPojo(
int id,
java.lang.String name,
java.util.List<java.lang.String> operationName,
java.util.List<MyOperation> operations,
int otherId1,
int otherId2,
int otherId3) {
this();
this.id = id;
setIdIsSet(true);
this.name = name;
this.operationName = operationName;
this.operations = operations;
this.otherId1 = otherId1;
setOtherId1IsSet(true);
this.otherId2 = otherId2;
setOtherId2IsSet(true);
this.otherId3 = otherId3;
setOtherId3IsSet(true);
}
/** Performs a deep copy on <i>other</i>. */
public MyPojo(MyPojo other) {
__isset_bitfield = other.__isset_bitfield;
this.id = other.id;
if (other.isSetName()) {
this.name = other.name;
}
if (other.isSetOperationName()) {
java.util.List<java.lang.String> __this__operationName =
new java.util.ArrayList<java.lang.String>(other.operationName);
this.operationName = __this__operationName;
}
if (other.isSetOperations()) {
java.util.List<MyOperation> __this__operations =
new java.util.ArrayList<MyOperation>(other.operations.size());
for (MyOperation other_element : other.operations) {
__this__operations.add(new MyOperation(other_element));
}
this.operations = __this__operations;
}
this.otherId1 = other.otherId1;
this.otherId2 = other.otherId2;
this.otherId3 = other.otherId3;
if (other.isSetSomeObject()) {
this.someObject = other.someObject;
}
}
private static <S extends org.apache.thrift.scheme.IScheme> S scheme(
org.apache.thrift.protocol.TProtocol proto) {
return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
? STANDARD_SCHEME_FACTORY
: TUPLE_SCHEME_FACTORY)
.getScheme();
}
public MyPojo deepCopy() {
return new MyPojo(this);
}
@Override
public void clear() {
setIdIsSet(false);
this.id = 0;
this.name = null;
this.operationName = null;
this.operations = null;
setOtherId1IsSet(false);
this.otherId1 = 0;
setOtherId2IsSet(false);
this.otherId2 = 0;
setOtherId3IsSet(false);
this.otherId3 = 0;
this.someObject = null;
}
public int getId() {
return this.id;
}
public MyPojo setId(int id) {
this.id = id;
setIdIsSet(true);
return this;
}
public void unsetId() {
__isset_bitfield =
org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __ID_ISSET_ID);
}
/** Returns true if field id is set (has been assigned a value) and false otherwise */
public boolean isSetId() {
return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __ID_ISSET_ID);
}
public void setIdIsSet(boolean value) {
__isset_bitfield =
org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __ID_ISSET_ID, value);
}
@org.apache.thrift.annotation.Nullable
public java.lang.String getName() {
return this.name;
}
public MyPojo setName(@org.apache.thrift.annotation.Nullable java.lang.String name) {
this.name = name;
return this;
}
public void unsetName() {
this.name = null;
}
/** Returns true if field name is set (has been assigned a value) and false otherwise */
public boolean isSetName() {
return this.name != null;
}
public void setNameIsSet(boolean value) {
if (!value) {
this.name = null;
}
}
public int getOperationNameSize() {
return (this.operationName == null) ? 0 : this.operationName.size();
}
@org.apache.thrift.annotation.Nullable
public java.util.Iterator<java.lang.String> getOperationNameIterator() {
return (this.operationName == null) ? null : this.operationName.iterator();
}
public void addToOperationName(java.lang.String elem) {
if (this.operationName == null) {
this.operationName = new java.util.ArrayList<java.lang.String>();
}
this.operationName.add(elem);
}
@org.apache.thrift.annotation.Nullable
public java.util.List<java.lang.String> getOperationName() {
return this.operationName;
}
public MyPojo setOperationName(
@org.apache.thrift.annotation.Nullable java.util.List<java.lang.String> operationName) {
this.operationName = operationName;
return this;
}
public void unsetOperationName() {
this.operationName = null;
}
/**
* Returns true if field operationName is set (has been assigned a value) and false otherwise
*/
public boolean isSetOperationName() {
return this.operationName != null;
}
public void setOperationNameIsSet(boolean value) {
if (!value) {
this.operationName = null;
}
}
public int getOperationsSize() {
return (this.operations == null) ? 0 : this.operations.size();
}
@org.apache.thrift.annotation.Nullable
public java.util.Iterator<MyOperation> getOperationsIterator() {
return (this.operations == null) ? null : this.operations.iterator();
}
public void addToOperations(MyOperation elem) {
if (this.operations == null) {
this.operations = new java.util.ArrayList<MyOperation>();
}
this.operations.add(elem);
}
@org.apache.thrift.annotation.Nullable
public java.util.List<MyOperation> getOperations() {
return this.operations;
}
public MyPojo setOperations(
@org.apache.thrift.annotation.Nullable java.util.List<MyOperation> operations) {
this.operations = operations;
return this;
}
public void unsetOperations() {
this.operations = null;
}
/** Returns true if field operations is set (has been assigned a value) and false otherwise */
public boolean isSetOperations() {
return this.operations != null;
}
public void setOperationsIsSet(boolean value) {
if (!value) {
this.operations = null;
}
}
public int getOtherId1() {
return this.otherId1;
}
public MyPojo setOtherId1(int otherId1) {
this.otherId1 = otherId1;
setOtherId1IsSet(true);
return this;
}
public void unsetOtherId1() {
__isset_bitfield =
org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __OTHERID1_ISSET_ID);
}
/** Returns true if field otherId1 is set (has been assigned a value) and false otherwise */
public boolean isSetOtherId1() {
return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __OTHERID1_ISSET_ID);
}
public void setOtherId1IsSet(boolean value) {
__isset_bitfield =
org.apache.thrift.EncodingUtils.setBit(
__isset_bitfield, __OTHERID1_ISSET_ID, value);
}
public int getOtherId2() {
return this.otherId2;
}
public MyPojo setOtherId2(int otherId2) {
this.otherId2 = otherId2;
setOtherId2IsSet(true);
return this;
}
public void unsetOtherId2() {
__isset_bitfield =
org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __OTHERID2_ISSET_ID);
}
/** Returns true if field otherId2 is set (has been assigned a value) and false otherwise */
public boolean isSetOtherId2() {
return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __OTHERID2_ISSET_ID);
}
public void setOtherId2IsSet(boolean value) {
__isset_bitfield =
org.apache.thrift.EncodingUtils.setBit(
__isset_bitfield, __OTHERID2_ISSET_ID, value);
}
public int getOtherId3() {
return this.otherId3;
}
public MyPojo setOtherId3(int otherId3) {
this.otherId3 = otherId3;
setOtherId3IsSet(true);
return this;
}
public void unsetOtherId3() {
__isset_bitfield =
org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __OTHERID3_ISSET_ID);
}
/** Returns true if field otherId3 is set (has been assigned a value) and false otherwise */
public boolean isSetOtherId3() {
return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __OTHERID3_ISSET_ID);
}
public void setOtherId3IsSet(boolean value) {
__isset_bitfield =
org.apache.thrift.EncodingUtils.setBit(
__isset_bitfield, __OTHERID3_ISSET_ID, value);
}
@org.apache.thrift.annotation.Nullable
public java.lang.String getSomeObject() {
return this.someObject;
}
public MyPojo setSomeObject(
@org.apache.thrift.annotation.Nullable java.lang.String someObject) {
this.someObject = someObject;
return this;
}
public void unsetSomeObject() {
this.someObject = null;
}
/** Returns true if field someObject is set (has been assigned a value) and false otherwise */
public boolean isSetSomeObject() {
return this.someObject != null;
}
public void setSomeObjectIsSet(boolean value) {
if (!value) {
this.someObject = null;
}
}
public void setFieldValue(
_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
switch (field) {
case ID:
if (value == null) {
unsetId();
} else {
setId((java.lang.Integer) value);
}
break;
case NAME:
if (value == null) {
unsetName();
} else {
setName((java.lang.String) value);
}
break;
case OPERATION_NAME:
if (value == null) {
unsetOperationName();
} else {
setOperationName((java.util.List<java.lang.String>) value);
}
break;
case OPERATIONS:
if (value == null) {
unsetOperations();
} else {
setOperations((java.util.List<MyOperation>) value);
}
break;
case OTHER_ID1:
if (value == null) {
unsetOtherId1();
} else {
setOtherId1((java.lang.Integer) value);
}
break;
case OTHER_ID2:
if (value == null) {
unsetOtherId2();
} else {
setOtherId2((java.lang.Integer) value);
}
break;
case OTHER_ID3:
if (value == null) {
unsetOtherId3();
} else {
setOtherId3((java.lang.Integer) value);
}
break;
case SOME_OBJECT:
if (value == null) {
unsetSomeObject();
} else {
setSomeObject((java.lang.String) value);
}
break;
}
}
@org.apache.thrift.annotation.Nullable
public java.lang.Object getFieldValue(_Fields field) {
switch (field) {
case ID:
return getId();
case NAME:
return getName();
case OPERATION_NAME:
return getOperationName();
case OPERATIONS:
return getOperations();
case OTHER_ID1:
return getOtherId1();
case OTHER_ID2:
return getOtherId2();
case OTHER_ID3:
return getOtherId3();
case SOME_OBJECT:
return getSomeObject();
}
throw new java.lang.IllegalStateException();
}
/**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new java.lang.IllegalArgumentException();
}
switch (field) {
case ID:
return isSetId();
case NAME:
return isSetName();
case OPERATION_NAME:
return isSetOperationName();
case OPERATIONS:
return isSetOperations();
case OTHER_ID1:
return isSetOtherId1();
case OTHER_ID2:
return isSetOtherId2();
case OTHER_ID3:
return isSetOtherId3();
case SOME_OBJECT:
return isSetSomeObject();
}
throw new java.lang.IllegalStateException();
}
@Override
public boolean equals(java.lang.Object that) {
if (that == null) return false;
if (that instanceof MyPojo) return this.equals((MyPojo) that);
return false;
}
public boolean equals(MyPojo that) {
if (that == null) return false;
if (this == that) return true;
boolean this_present_id = true;
boolean that_present_id = true;
if (this_present_id || that_present_id) {
if (!(this_present_id && that_present_id)) return false;
if (this.id != that.id) return false;
}
boolean this_present_name = true && this.isSetName();
boolean that_present_name = true && that.isSetName();
if (this_present_name || that_present_name) {
if (!(this_present_name && that_present_name)) return false;
if (!this.name.equals(that.name)) return false;
}
boolean this_present_operationName = true && this.isSetOperationName();
boolean that_present_operationName = true && that.isSetOperationName();
if (this_present_operationName || that_present_operationName) {
if (!(this_present_operationName && that_present_operationName)) return false;
if (!this.operationName.equals(that.operationName)) return false;
}
boolean this_present_operations = true && this.isSetOperations();
boolean that_present_operations = true && that.isSetOperations();
if (this_present_operations || that_present_operations) {
if (!(this_present_operations && that_present_operations)) return false;
if (!this.operations.equals(that.operations)) return false;
}
boolean this_present_otherId1 = true;
boolean that_present_otherId1 = true;
if (this_present_otherId1 || that_present_otherId1) {
if (!(this_present_otherId1 && that_present_otherId1)) return false;
if (this.otherId1 != that.otherId1) return false;
}
boolean this_present_otherId2 = true;
boolean that_present_otherId2 = true;
if (this_present_otherId2 || that_present_otherId2) {
if (!(this_present_otherId2 && that_present_otherId2)) return false;
if (this.otherId2 != that.otherId2) return false;
}
boolean this_present_otherId3 = true;
boolean that_present_otherId3 = true;
if (this_present_otherId3 || that_present_otherId3) {
if (!(this_present_otherId3 && that_present_otherId3)) return false;
if (this.otherId3 != that.otherId3) return false;
}
boolean this_present_someObject = true && this.isSetSomeObject();
boolean that_present_someObject = true && that.isSetSomeObject();
if (this_present_someObject || that_present_someObject) {
if (!(this_present_someObject && that_present_someObject)) return false;
if (!this.someObject.equals(that.someObject)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 1;
hashCode = hashCode * 8191 + id;
hashCode = hashCode * 8191 + ((isSetName()) ? 131071 : 524287);
if (isSetName()) hashCode = hashCode * 8191 + name.hashCode();
hashCode = hashCode * 8191 + ((isSetOperationName()) ? 131071 : 524287);
if (isSetOperationName()) hashCode = hashCode * 8191 + operationName.hashCode();
hashCode = hashCode * 8191 + ((isSetOperations()) ? 131071 : 524287);
if (isSetOperations()) hashCode = hashCode * 8191 + operations.hashCode();
hashCode = hashCode * 8191 + otherId1;
hashCode = hashCode * 8191 + otherId2;
hashCode = hashCode * 8191 + otherId3;
hashCode = hashCode * 8191 + ((isSetSomeObject()) ? 131071 : 524287);
if (isSetSomeObject()) hashCode = hashCode * 8191 + someObject.hashCode();
return hashCode;
}
@Override
public int compareTo(MyPojo other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = java.lang.Boolean.valueOf(isSetId()).compareTo(other.isSetId());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetId()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.id, other.id);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = java.lang.Boolean.valueOf(isSetName()).compareTo(other.isSetName());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetName()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison =
java.lang.Boolean.valueOf(isSetOperationName())
.compareTo(other.isSetOperationName());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetOperationName()) {
lastComparison =
org.apache.thrift.TBaseHelper.compareTo(
this.operationName, other.operationName);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison =
java.lang.Boolean.valueOf(isSetOperations()).compareTo(other.isSetOperations());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetOperations()) {
lastComparison =
org.apache.thrift.TBaseHelper.compareTo(this.operations, other.operations);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison =
java.lang.Boolean.valueOf(isSetOtherId1()).compareTo(other.isSetOtherId1());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetOtherId1()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.otherId1, other.otherId1);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison =
java.lang.Boolean.valueOf(isSetOtherId2()).compareTo(other.isSetOtherId2());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetOtherId2()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.otherId2, other.otherId2);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison =
java.lang.Boolean.valueOf(isSetOtherId3()).compareTo(other.isSetOtherId3());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetOtherId3()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.otherId3, other.otherId3);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison =
java.lang.Boolean.valueOf(isSetSomeObject()).compareTo(other.isSetSomeObject());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSomeObject()) {
lastComparison =
org.apache.thrift.TBaseHelper.compareTo(this.someObject, other.someObject);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
@org.apache.thrift.annotation.Nullable
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot)
throws org.apache.thrift.TException {
scheme(iprot).read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot)
throws org.apache.thrift.TException {
scheme(oprot).write(oprot, this);
}
@Override
public java.lang.String toString() {
java.lang.StringBuilder sb = new java.lang.StringBuilder("MyPojo(");
boolean first = true;
sb.append("id:");
sb.append(this.id);
first = false;
if (!first) sb.append(", ");
sb.append("name:");
if (this.name == null) {
sb.append("null");
} else {
sb.append(this.name);
}
first = false;
if (!first) sb.append(", ");
sb.append("operationName:");
if (this.operationName == null) {
sb.append("null");
} else {
sb.append(this.operationName);
}
first = false;
if (!first) sb.append(", ");
sb.append("operations:");
if (this.operations == null) {
sb.append("null");
} else {
sb.append(this.operations);
}
first = false;
if (!first) sb.append(", ");
sb.append("otherId1:");
sb.append(this.otherId1);
first = false;
if (!first) sb.append(", ");
sb.append("otherId2:");
sb.append(this.otherId2);
first = false;
if (!first) sb.append(", ");
sb.append("otherId3:");
sb.append(this.otherId3);
first = false;
if (isSetSomeObject()) {
if (!first) sb.append(", ");
sb.append("someObject:");
if (this.someObject == null) {
sb.append("null");
} else {
sb.append(this.someObject);
}
first = false;
}
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(
new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in)
throws java.io.IOException, java.lang.ClassNotFoundException {
try {
// it doesn't seem like you should have to do this, but java serialization is wacky, and
// doesn't call the default constructor.
__isset_bitfield = 0;
read(
new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
/**
* The set of fields this struct contains, along with convenience methods for finding and
* manipulating them.
*/
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
ID((short) 1, "id"),
NAME((short) 2, "name"),
OPERATION_NAME((short) 3, "operationName"),
OPERATIONS((short) 4, "operations"),
OTHER_ID1((short) 5, "otherId1"),
OTHER_ID2((short) 6, "otherId2"),
OTHER_ID3((short) 7, "otherId3"),
SOME_OBJECT((short) 8, "someObject");
private static final java.util.Map<java.lang.String, _Fields> byName =
new java.util.HashMap<java.lang.String, _Fields>();
static {
for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
private final short _thriftId;
private final java.lang.String _fieldName;
_Fields(short thriftId, java.lang.String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
/** Find the _Fields constant that matches fieldId, or null if its not found. */
@org.apache.thrift.annotation.Nullable
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
case 1: // ID
return ID;
case 2: // NAME
return NAME;
case 3: // OPERATION_NAME
return OPERATION_NAME;
case 4: // OPERATIONS
return OPERATIONS;
case 5: // OTHER_ID1
return OTHER_ID1;
case 6: // OTHER_ID2
return OTHER_ID2;
case 7: // OTHER_ID3
return OTHER_ID3;
case 8: // SOME_OBJECT
return SOME_OBJECT;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new java.lang.IllegalArgumentException(
"Field " + fieldId + " doesn't exist!");
return fields;
}
/** Find the _Fields constant that matches name, or null if its not found. */
@org.apache.thrift.annotation.Nullable
public static _Fields findByName(java.lang.String name) {
return byName.get(name);
}
public short getThriftFieldId() {
return _thriftId;
}
public java.lang.String getFieldName() {
return _fieldName;
}
}
private static class MyPojoStandardSchemeFactory
implements org.apache.thrift.scheme.SchemeFactory {
public MyPojoStandardScheme getScheme() {
return new MyPojoStandardScheme();
}
}
private static class MyPojoStandardScheme
extends org.apache.thrift.scheme.StandardScheme<MyPojo> {
public void read(org.apache.thrift.protocol.TProtocol iprot, MyPojo struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true) {
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // ID
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.id = iprot.readI32();
struct.setIdIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 2: // NAME
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.name = iprot.readString();
struct.setNameIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 3: // OPERATION_NAME
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list0 = iprot.readListBegin();
struct.operationName =
new java.util.ArrayList<java.lang.String>(_list0.size);
@org.apache.thrift.annotation.Nullable java.lang.String _elem1;
for (int _i2 = 0; _i2 < _list0.size; ++_i2) {
_elem1 = iprot.readString();
struct.operationName.add(_elem1);
}
iprot.readListEnd();
}
struct.setOperationNameIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 4: // OPERATIONS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list3 = iprot.readListBegin();
struct.operations =
new java.util.ArrayList<MyOperation>(_list3.size);
@org.apache.thrift.annotation.Nullable MyOperation _elem4;
for (int _i5 = 0; _i5 < _list3.size; ++_i5) {
_elem4 = new MyOperation();
_elem4.read(iprot);
struct.operations.add(_elem4);
}
iprot.readListEnd();
}
struct.setOperationsIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 5: // OTHER_ID1
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.otherId1 = iprot.readI32();
struct.setOtherId1IsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 6: // OTHER_ID2
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.otherId2 = iprot.readI32();
struct.setOtherId2IsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 7: // OTHER_ID3
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.otherId3 = iprot.readI32();
struct.setOtherId3IsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 8: // SOME_OBJECT
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.someObject = iprot.readString();
struct.setSomeObjectIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate
// method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, MyPojo struct)
throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldBegin(ID_FIELD_DESC);
oprot.writeI32(struct.id);
oprot.writeFieldEnd();
if (struct.name != null) {
oprot.writeFieldBegin(NAME_FIELD_DESC);
oprot.writeString(struct.name);
oprot.writeFieldEnd();
}
if (struct.operationName != null) {
oprot.writeFieldBegin(OPERATION_NAME_FIELD_DESC);
{
oprot.writeListBegin(
new org.apache.thrift.protocol.TList(
org.apache.thrift.protocol.TType.STRING,
struct.operationName.size()));
for (java.lang.String _iter6 : struct.operationName) {
oprot.writeString(_iter6);
}
oprot.writeListEnd();
}
oprot.writeFieldEnd();
}
if (struct.operations != null) {
oprot.writeFieldBegin(OPERATIONS_FIELD_DESC);
{
oprot.writeListBegin(
new org.apache.thrift.protocol.TList(
org.apache.thrift.protocol.TType.STRUCT,
struct.operations.size()));
for (MyOperation _iter7 : struct.operations) {
_iter7.write(oprot);
}
oprot.writeListEnd();
}
oprot.writeFieldEnd();
}
oprot.writeFieldBegin(OTHER_ID1_FIELD_DESC);
oprot.writeI32(struct.otherId1);
oprot.writeFieldEnd();
oprot.writeFieldBegin(OTHER_ID2_FIELD_DESC);
oprot.writeI32(struct.otherId2);
oprot.writeFieldEnd();
oprot.writeFieldBegin(OTHER_ID3_FIELD_DESC);
oprot.writeI32(struct.otherId3);
oprot.writeFieldEnd();
if (struct.someObject != null) {
if (struct.isSetSomeObject()) {
oprot.writeFieldBegin(SOME_OBJECT_FIELD_DESC);
oprot.writeString(struct.someObject);
oprot.writeFieldEnd();
}
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class MyPojoTupleSchemeFactory
implements org.apache.thrift.scheme.SchemeFactory {
public MyPojoTupleScheme getScheme() {
return new MyPojoTupleScheme();
}
}
private static class MyPojoTupleScheme extends org.apache.thrift.scheme.TupleScheme<MyPojo> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, MyPojo struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TTupleProtocol oprot =
(org.apache.thrift.protocol.TTupleProtocol) prot;
java.util.BitSet optionals = new java.util.BitSet();
if (struct.isSetId()) {
optionals.set(0);
}
if (struct.isSetName()) {
optionals.set(1);
}
if (struct.isSetOperationName()) {
optionals.set(2);
}
if (struct.isSetOperations()) {
optionals.set(3);
}
if (struct.isSetOtherId1()) {
optionals.set(4);
}
if (struct.isSetOtherId2()) {
optionals.set(5);
}
if (struct.isSetOtherId3()) {
optionals.set(6);
}
if (struct.isSetSomeObject()) {
optionals.set(7);
}
oprot.writeBitSet(optionals, 8);
if (struct.isSetId()) {
oprot.writeI32(struct.id);
}
if (struct.isSetName()) {
oprot.writeString(struct.name);
}
if (struct.isSetOperationName()) {
{
oprot.writeI32(struct.operationName.size());
for (java.lang.String _iter8 : struct.operationName) {
oprot.writeString(_iter8);
}
}
}
if (struct.isSetOperations()) {
{
oprot.writeI32(struct.operations.size());
for (MyOperation _iter9 : struct.operations) {
_iter9.write(oprot);
}
}
}
if (struct.isSetOtherId1()) {
oprot.writeI32(struct.otherId1);
}
if (struct.isSetOtherId2()) {
oprot.writeI32(struct.otherId2);
}
if (struct.isSetOtherId3()) {
oprot.writeI32(struct.otherId3);
}
if (struct.isSetSomeObject()) {
oprot.writeString(struct.someObject);
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, MyPojo struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TTupleProtocol iprot =
(org.apache.thrift.protocol.TTupleProtocol) prot;
java.util.BitSet incoming = iprot.readBitSet(8);
if (incoming.get(0)) {
struct.id = iprot.readI32();
struct.setIdIsSet(true);
}
if (incoming.get(1)) {
struct.name = iprot.readString();
struct.setNameIsSet(true);
}
if (incoming.get(2)) {
{
org.apache.thrift.protocol.TList _list10 =
new org.apache.thrift.protocol.TList(
org.apache.thrift.protocol.TType.STRING, iprot.readI32());
struct.operationName = new java.util.ArrayList<java.lang.String>(_list10.size);
@org.apache.thrift.annotation.Nullable java.lang.String _elem11;
for (int _i12 = 0; _i12 < _list10.size; ++_i12) {
_elem11 = iprot.readString();
struct.operationName.add(_elem11);
}
}
struct.setOperationNameIsSet(true);
}
if (incoming.get(3)) {
{
org.apache.thrift.protocol.TList _list13 =
new org.apache.thrift.protocol.TList(
org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
struct.operations = new java.util.ArrayList<MyOperation>(_list13.size);
@org.apache.thrift.annotation.Nullable MyOperation _elem14;
for (int _i15 = 0; _i15 < _list13.size; ++_i15) {
_elem14 = new MyOperation();
_elem14.read(iprot);
struct.operations.add(_elem14);
}
}
struct.setOperationsIsSet(true);
}
if (incoming.get(4)) {
struct.otherId1 = iprot.readI32();
struct.setOtherId1IsSet(true);
}
if (incoming.get(5)) {
struct.otherId2 = iprot.readI32();
struct.setOtherId2IsSet(true);
}
if (incoming.get(6)) {
struct.otherId3 = iprot.readI32();
struct.setOtherId3IsSet(true);
}
if (incoming.get(7)) {
struct.someObject = iprot.readString();
struct.setSomeObjectIsSet(true);
}
}
}
}
| 4,628 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/functions/LongSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark.functions;
import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction;
public class LongSource extends RichParallelSourceFunction<Long> {
private volatile boolean running = true;
private long maxValue;
public LongSource(long maxValue) {
this.maxValue = maxValue;
}
@Override
public void run(SourceContext<Long> ctx) throws Exception {
long counter = 0;
while (running) {
synchronized (ctx.getCheckpointLock()) {
ctx.collect(counter);
counter++;
if (counter >= maxValue) {
cancel();
}
}
}
}
@Override
public void cancel() {
running = false;
}
}
| 4,629 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/functions/TestUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark.functions;
import org.apache.flink.api.common.JobExecutionResult;
import org.apache.flink.client.program.ProgramInvocationException;
import org.apache.flink.runtime.client.JobExecutionException;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import static org.junit.Assert.fail;
/** Test utilities. */
public class TestUtils {
public static JobExecutionResult tryExecute(StreamExecutionEnvironment see, String name)
throws Exception {
try {
return see.execute(name);
} catch (ProgramInvocationException | JobExecutionException root) {
Throwable cause = root.getCause();
// search for nested SuccessExceptions
int depth = 0;
while (!(cause instanceof SuccessException)) {
if (cause == null || depth++ == 20) {
root.printStackTrace();
fail("Test failed: " + root.getMessage());
} else {
cause = cause.getCause();
}
}
}
return null;
}
}
| 4,630 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/functions/QueuingLongSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark.functions;
public class QueuingLongSource extends LongSource {
private static Object lock = new Object();
private static int currentRank = 1;
private final int rank;
public QueuingLongSource(int rank, long maxValue) {
super(maxValue);
this.rank = rank;
}
public static void reset() {
currentRank = 1;
}
@Override
public void run(SourceContext<Long> ctx) throws Exception {
synchronized (lock) {
while (currentRank != rank) {
lock.wait();
}
}
super.run(ctx);
synchronized (lock) {
currentRank++;
lock.notifyAll();
}
}
}
| 4,631 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/functions/ValidatingCounter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark.functions;
import org.apache.flink.api.common.functions.ReduceFunction;
public class ValidatingCounter<T> implements ReduceFunction<T> {
private long expectedCount;
private long count = 0;
public ValidatingCounter(long expectedCount) {
this.expectedCount = expectedCount;
}
@Override
public T reduce(T value1, T value2) throws Exception {
count++;
if (count >= expectedCount) {
throw new SuccessException();
}
return value1;
}
}
| 4,632 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/functions/SumReduce.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark.functions;
import org.apache.flink.api.common.functions.ReduceFunction;
public class SumReduce implements ReduceFunction<Long> {
@Override
public Long reduce(Long value1, Long value2) throws Exception {
return value1 + value2;
}
}
| 4,633 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/functions/IntLongApplications.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark.functions;
import org.apache.flink.benchmark.CollectSink;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.windowing.assigners.WindowAssigner;
import org.apache.flink.streaming.api.windowing.windows.Window;
public class IntLongApplications {
public static <W extends Window> void reduceWithWindow(
DataStreamSource<IntegerLongSource.Record> source,
WindowAssigner<Object, W> windowAssigner) {
source.map(new MultiplyIntLongByTwo())
.keyBy(record -> record.key)
.window(windowAssigner)
.reduce(new SumReduceIntLong())
.addSink(new CollectSink());
}
}
| 4,634 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/functions/MultiplyIntLongByTwo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark.functions;
import org.apache.flink.api.common.functions.MapFunction;
public class MultiplyIntLongByTwo
implements MapFunction<IntegerLongSource.Record, IntegerLongSource.Record> {
@Override
public IntegerLongSource.Record map(IntegerLongSource.Record record) throws Exception {
return IntegerLongSource.Record.of(record.key, record.value * 2);
}
}
| 4,635 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/functions/LongSourceType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark.functions;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.connector.source.Boundedness;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import java.util.function.BiFunction;
/** Enum based factory for different Long sources. */
public enum LongSourceType {
LEGACY(
(env, maxValue) -> {
return env.addSource(new LongSource(maxValue));
}),
F27_BOUNDED(
(env, maxValue) -> {
return env.fromSource(
new LongNewSource(Boundedness.BOUNDED, maxValue),
WatermarkStrategy.noWatermarks(),
"NewLongSource");
}),
F27_UNBOUNDED(
(env, maxValue) -> {
return env.fromSource(
new LongNewSource(Boundedness.CONTINUOUS_UNBOUNDED, maxValue),
WatermarkStrategy.noWatermarks(),
"NewLongSource");
});
private final BiFunction<StreamExecutionEnvironment, Long, DataStreamSource<Long>> factory;
LongSourceType(BiFunction<StreamExecutionEnvironment, Long, DataStreamSource<Long>> factory) {
this.factory = factory;
}
public DataStreamSource<Long> source(StreamExecutionEnvironment environment, long maxValue) {
return factory.apply(environment, maxValue);
}
};
| 4,636 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/functions/IntegerLongSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark.functions;
import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction;
public class IntegerLongSource extends RichParallelSourceFunction<IntegerLongSource.Record> {
private volatile boolean running = true;
private int numberOfKeys;
private long numberOfElements;
public IntegerLongSource(int numberOfKeys, long numberOfElements) {
this.numberOfKeys = numberOfKeys;
this.numberOfElements = numberOfElements;
}
@Override
public void run(SourceContext<Record> ctx) throws Exception {
long counter = 0;
while (running && counter < numberOfElements) {
synchronized (ctx.getCheckpointLock()) {
ctx.collectWithTimestamp(
Record.of((int) (counter % numberOfKeys), counter), counter);
counter++;
}
}
running = false;
}
@Override
public void cancel() {
running = false;
}
public static final class Record {
public final int key;
public final long value;
public Record() {
this(0, 0);
}
public Record(int key, long value) {
this.key = key;
this.value = value;
}
public static Record of(int key, long value) {
return new Record(key, value);
}
public int getKey() {
return key;
}
@Override
public String toString() {
return String.format("(%s, %s)", key, value);
}
}
}
| 4,637 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/functions/MultiplyByTwo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark.functions;
import org.apache.flink.api.common.functions.MapFunction;
public class MultiplyByTwo implements MapFunction<Long, Long> {
@Override
public Long map(Long value) throws Exception {
return value * 2;
}
}
| 4,638 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/functions/SuccessException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark.functions;
public class SuccessException extends Exception {}
| 4,639 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/functions/LongNewSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark.functions;
import org.apache.flink.api.connector.source.Boundedness;
import org.apache.flink.api.connector.source.SplitEnumerator;
import org.apache.flink.api.connector.source.SplitEnumeratorContext;
import org.apache.flink.api.connector.source.lib.NumberSequenceSource;
import org.apache.flink.api.connector.source.lib.util.IteratorSourceEnumerator;
import java.util.Collection;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
/**
* The source should produce same records as {@link LongSource}.
*
* <p>{@link LongSource} generates records from 0 to {@code maxValue} for every parallel instance.
* The original {@link NumberSequenceSource} would split the range 0 to {@code maxValue} between all
* subtasks.
*/
public class LongNewSource extends NumberSequenceSource {
private final Boundedness boundedness;
private final long maxValue;
public LongNewSource(Boundedness boundedness, long maxValue) {
super(-1, -1); // we do not use the from/to of the underlying source
this.boundedness = boundedness;
this.maxValue = maxValue;
}
@Override
public Boundedness getBoundedness() {
return boundedness;
}
@Override
public SplitEnumerator<NumberSequenceSplit, Collection<NumberSequenceSplit>> createEnumerator(
SplitEnumeratorContext<NumberSequenceSplit> splitEnumeratorContext) {
final List<NumberSequenceSplit> splits =
IntStream.range(0, splitEnumeratorContext.currentParallelism())
.mapToObj(id -> new NumberSequenceSplit(String.valueOf(id), 0, maxValue))
.collect(Collectors.toList());
return new IteratorSourceEnumerator<>(splitEnumeratorContext, splits);
}
}
| 4,640 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/functions/BaseSourceWithKeyRange.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark.functions;
import org.apache.flink.streaming.api.functions.source.ParallelSourceFunction;
import org.apache.flink.streaming.api.functions.source.SourceFunction.SourceContext;
/** Abstract base class for sources with a defined number of events and a fixed key range. */
public abstract class BaseSourceWithKeyRange<T> implements ParallelSourceFunction<T> {
private static final long serialVersionUID = 8318018060123048234L;
protected final int numKeys;
protected int remainingEvents;
public BaseSourceWithKeyRange(int numEvents, int numKeys) {
this.remainingEvents = numEvents;
this.numKeys = numKeys;
}
protected void init() {}
protected abstract T getElement(int keyId);
@Override
public void run(SourceContext<T> out) {
init();
int keyId = 0;
while (--remainingEvents >= 0) {
T element = getElement(keyId);
synchronized (out.getCheckpointLock()) {
out.collect(element);
}
++keyId;
if (keyId >= numKeys) {
keyId = 0;
}
}
}
@Override
public void cancel() {
this.remainingEvents = 0;
}
}
| 4,641 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/functions/SumReduceIntLong.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark.functions;
import org.apache.flink.api.common.functions.ReduceFunction;
public class SumReduceIntLong implements ReduceFunction<IntegerLongSource.Record> {
@Override
public IntegerLongSource.Record reduce(
IntegerLongSource.Record var1, IntegerLongSource.Record var2) throws Exception {
return IntegerLongSource.Record.of(var1.key, var1.value + var2.value);
}
}
| 4,642 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/olap | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/olap/benchmark/HighAvailabilityServiceBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.olap.benchmark;
import org.apache.curator.test.TestingServer;
import org.apache.flink.api.common.JobID;
import org.apache.flink.benchmark.BenchmarkBase;
import org.apache.flink.benchmark.FlinkEnvironmentContext;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.HighAvailabilityOptions;
import org.apache.flink.runtime.jobgraph.JobGraph;
import org.apache.flink.runtime.jobgraph.JobVertex;
import org.apache.flink.runtime.jobmanager.HighAvailabilityMode;
import org.apache.flink.runtime.testtasks.NoOpInvokable;
import org.apache.flink.util.FileUtils;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.util.UUID;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.openjdk.jmh.annotations.Scope.Thread;
/**
* When Flink session cluster supports shorted-lived jobs(OLAP), HA service needs to be enabled.
* However, after HA service is enabled, Flink needs to interact with HA service frequently when
* processing short-lived jobs, resulting in increased latency and decreased QPS. This benchmark
* mainly statist the QPS of Flink Session cluster with and without HA service for shorted-lived
* jobs and HA service optimization.
*/
@OutputTimeUnit(SECONDS)
public class HighAvailabilityServiceBenchmark extends BenchmarkBase {
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(".*" + HighAvailabilityServiceBenchmark.class.getCanonicalName() + ".*")
.build();
new Runner(options).run();
}
@Benchmark
public void submitJobThroughput(HighAvailabilityContext context) throws Exception {
context.miniCluster.executeJobBlocking(buildNoOpJob());
}
private static JobGraph buildNoOpJob() {
JobGraph jobGraph = new JobGraph(JobID.generate(), UUID.randomUUID().toString());
jobGraph.addVertex(createNoOpVertex());
return jobGraph;
}
private static JobVertex createNoOpVertex() {
JobVertex vertex = new JobVertex("v");
vertex.setInvokableClass(NoOpInvokable.class);
vertex.setParallelism(1);
vertex.setMaxParallelism(1);
return vertex;
}
@State(Thread)
public static class HighAvailabilityContext extends FlinkEnvironmentContext {
private TestingServer testingServer;
public final File haDir;
@Param({"ZOOKEEPER", "NONE"})
public HighAvailabilityMode highAvailabilityMode;
public HighAvailabilityContext() {
try {
haDir = Files.createTempDirectory("bench-ha-").toFile();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public void setUp() throws Exception {
if (isZookeeperHighAvailability()) {
testingServer = new TestingServer();
testingServer.start();
}
// The method `super.setUp()` will call `createConfiguration()` to get Configuration and
// create a `MiniCluster`. We need to start TestingServer before `createConfiguration()`,
// then we can add zookeeper quorum in the configuration. So we can only start
// `TestingServer` before `super.setUp()`.
super.setUp();
}
private boolean isZookeeperHighAvailability() {
return highAvailabilityMode == HighAvailabilityMode.ZOOKEEPER;
}
@Override
protected Configuration createConfiguration() {
Configuration configuration = super.createConfiguration();
configuration.set(HighAvailabilityOptions.HA_MODE, highAvailabilityMode.name());
configuration.set(HighAvailabilityOptions.HA_STORAGE_PATH, haDir.toURI().toString());
if (isZookeeperHighAvailability()) {
configuration.set(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, testingServer.getConnectString());
}
return configuration;
}
@Override
public void tearDown() throws Exception {
super.tearDown();
if (isZookeeperHighAvailability()) {
testingServer.stop();
testingServer.close();
}
FileUtils.deleteDirectory(haDir);
}
}
}
| 4,643 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/config/StateBenchmarkOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.config;
import org.apache.flink.configuration.ConfigOption;
import static org.apache.flink.configuration.ConfigOptions.key;
public class StateBenchmarkOptions {
public static final ConfigOption<String> STATE_DATA_DIR =
key("benchmark.state.data-dir")
.stringType()
.noDefaultValue()
.withDescription("The dir to put state data.");
}
| 4,644 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/config/ConfigUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.config;
import org.apache.flink.configuration.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
public class ConfigUtil {
private static final Logger LOG = LoggerFactory.getLogger(ConfigUtil.class);
private static final String BENCHMARK_CONF = "benchmark-conf.yaml";
/** Load benchmark conf from classpath. */
public static Configuration loadBenchMarkConf() {
InputStream inputStream =
ConfigUtil.class.getClassLoader().getResourceAsStream(BENCHMARK_CONF);
return loadYAMLResource(inputStream);
}
/**
* This is copied from {@code GlobalConfiguration#loadYAMLResource} to avoid depending
* on @Internal api.
*/
private static Configuration loadYAMLResource(InputStream inputStream) {
final Configuration config = new Configuration();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream))) {
String line;
int lineNo = 0;
while ((line = reader.readLine()) != null) {
lineNo++;
// 1. check for comments
String[] comments = line.split("#", 2);
String conf = comments[0].trim();
// 2. get key and value
if (conf.length() > 0) {
String[] kv = conf.split(": ", 2);
// skip line with no valid key-value pair
if (kv.length == 1) {
LOG.warn(
"Error while trying to split key and value in configuration file "
+ ":"
+ lineNo
+ ": \""
+ line
+ "\"");
continue;
}
String key = kv[0].trim();
String value = kv[1].trim();
// sanity check
if (key.length() == 0 || value.length() == 0) {
LOG.warn(
"Error after splitting key and value in configuration file "
+ ":"
+ lineNo
+ ": \""
+ line
+ "\"");
continue;
}
LOG.info("Loading configuration property: {}, {}", key, value);
config.setString(key, value);
}
}
} catch (IOException e) {
throw new RuntimeException("Error parsing YAML configuration.", e);
}
return config;
}
}
| 4,645 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark/SchedulerBenchmarkExecutorBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.scheduler.benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import java.util.concurrent.TimeUnit;
/** The base class of all benchmarks related to the scheduler. */
@SuppressWarnings("MethodMayBeStatic")
@State(Scope.Thread)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@BenchmarkMode(Mode.AverageTime)
@Fork(
value = 6,
jvmArgsAppend = {
"-Djava.rmi.server.hostname=127.0.0.1",
"-Dcom.sun.management.jmxremote.authenticate=false",
"-Dcom.sun.management.jmxremote.ssl=false",
"-Dcom.sun.management.jmxremote.ssl"
})
public class SchedulerBenchmarkExecutorBase {
public static void runBenchmark(Class<?> clazz) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(".*" + clazz.getCanonicalName() + ".*")
.build();
new Runner(options).run();
}
}
| 4,646 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark/topology/BuildExecutionGraphBenchmarkExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.scheduler.benchmark.topology;
import org.apache.flink.runtime.scheduler.benchmark.JobConfiguration;
import org.apache.flink.runtime.scheduler.benchmark.topology.BuildExecutionGraphBenchmark;
import org.apache.flink.scheduler.benchmark.SchedulerBenchmarkExecutorBase;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.runner.RunnerException;
/** The benchmark of building the topology of ExecutionGraph in a STREAMING/BATCH job. */
public class BuildExecutionGraphBenchmarkExecutor extends SchedulerBenchmarkExecutorBase {
@Param({"BATCH", "STREAMING"})
private JobConfiguration jobConfiguration;
private BuildExecutionGraphBenchmark benchmark;
public static void main(String[] args) throws RunnerException {
runBenchmark(BuildExecutionGraphBenchmarkExecutor.class);
}
@Setup(Level.Trial)
public void setup() throws Exception {
benchmark = new BuildExecutionGraphBenchmark();
benchmark.setup(jobConfiguration);
}
@Benchmark
@BenchmarkMode(Mode.SingleShotTime)
public void buildTopology() throws Exception {
benchmark.buildTopology();
}
@TearDown(Level.Trial)
public void teardown() {
benchmark.teardown();
}
}
| 4,647 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark/scheduling/SchedulingDownstreamTasksInBatchJobBenchmarkExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.scheduler.benchmark.scheduling;
import org.apache.flink.runtime.scheduler.benchmark.JobConfiguration;
import org.apache.flink.runtime.scheduler.benchmark.scheduling.SchedulingDownstreamTasksInBatchJobBenchmark;
import org.apache.flink.scheduler.benchmark.SchedulerBenchmarkExecutorBase;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.runner.RunnerException;
/** The benchmark of scheduling downstream task in a BATCH job. */
public class SchedulingDownstreamTasksInBatchJobBenchmarkExecutor
extends SchedulerBenchmarkExecutorBase {
@Param({"BATCH", "BATCH_HYBRID_DEFAULT", "BATCH_HYBRID_PARTIAL_FINISHED", "BATCH_HYBRID_ALL_FINISHED"})
private JobConfiguration jobConfiguration;
private SchedulingDownstreamTasksInBatchJobBenchmark benchmark;
public static void main(String[] args) throws RunnerException {
runBenchmark(SchedulingDownstreamTasksInBatchJobBenchmarkExecutor.class);
}
@Setup(Level.Trial)
public void setup() throws Exception {
benchmark = new SchedulingDownstreamTasksInBatchJobBenchmark();
benchmark.setup(jobConfiguration);
}
@Benchmark
@BenchmarkMode(Mode.SingleShotTime)
public void schedulingDownstreamTasks() {
benchmark.schedulingDownstreamTasks();
}
@TearDown(Level.Trial)
public void teardown() {
benchmark.teardown();
}
}
| 4,648 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark/scheduling/InitSchedulingStrategyBenchmarkExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.scheduler.benchmark.scheduling;
import org.apache.flink.runtime.scheduler.benchmark.JobConfiguration;
import org.apache.flink.runtime.scheduler.benchmark.scheduling.InitSchedulingStrategyBenchmark;
import org.apache.flink.scheduler.benchmark.SchedulerBenchmarkExecutorBase;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.infra.Blackhole;
import org.openjdk.jmh.runner.RunnerException;
/** The benchmark of initializing the scheduling strategy in a STREAMING/BATCH job. */
public class InitSchedulingStrategyBenchmarkExecutor extends SchedulerBenchmarkExecutorBase {
@Param({"BATCH", "STREAMING"})
private JobConfiguration jobConfiguration;
private InitSchedulingStrategyBenchmark benchmark;
public static void main(String[] args) throws RunnerException {
runBenchmark(InitSchedulingStrategyBenchmarkExecutor.class);
}
@Setup(Level.Trial)
public void setup() throws Exception {
benchmark = new InitSchedulingStrategyBenchmark();
benchmark.setup(jobConfiguration);
}
@Benchmark
@BenchmarkMode(Mode.SingleShotTime)
public void initSchedulingStrategy(Blackhole blackhole) {
blackhole.consume(benchmark.initSchedulingStrategy());
}
@TearDown(Level.Trial)
public void teardown() {
benchmark.teardown();
}
}
| 4,649 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark/deploying/DeployingTasksInStreamingJobBenchmarkExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.scheduler.benchmark.deploying;
import org.apache.flink.runtime.executiongraph.Execution;
import org.apache.flink.runtime.scheduler.benchmark.JobConfiguration;
import org.apache.flink.runtime.scheduler.benchmark.deploying.DeployingTasksInStreamingJobBenchmark;
import org.apache.flink.scheduler.benchmark.SchedulerBenchmarkExecutorBase;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.runner.RunnerException;
/**
* The benchmark of deploying tasks in a STREAMING job. The related method is {@link
* Execution#deploy}.
*/
public class DeployingTasksInStreamingJobBenchmarkExecutor extends SchedulerBenchmarkExecutorBase {
@Param("STREAMING")
private JobConfiguration jobConfiguration;
private DeployingTasksInStreamingJobBenchmark benchmark;
public static void main(String[] args) throws RunnerException {
runBenchmark(DeployingTasksInStreamingJobBenchmark.class);
}
@Setup(Level.Trial)
public void setup() throws Exception {
benchmark = new DeployingTasksInStreamingJobBenchmark();
benchmark.setup(jobConfiguration);
}
@Benchmark
@BenchmarkMode(Mode.SingleShotTime)
public void deployAllTasks() throws Exception {
benchmark.deployAllTasks();
}
@TearDown(Level.Trial)
public void teardown() {
benchmark.teardown();
}
}
| 4,650 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark/deploying/DeployingDownstreamTasksInBatchJobBenchmarkExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.scheduler.benchmark.deploying;
import org.apache.flink.runtime.executiongraph.Execution;
import org.apache.flink.runtime.scheduler.benchmark.JobConfiguration;
import org.apache.flink.runtime.scheduler.benchmark.deploying.DeployingDownstreamTasksInBatchJobBenchmark;
import org.apache.flink.scheduler.benchmark.SchedulerBenchmarkExecutorBase;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.runner.RunnerException;
/**
* The benchmark of deploying downstream tasks in a BATCH job. The related method is {@link
* Execution#deploy}.
*/
public class DeployingDownstreamTasksInBatchJobBenchmarkExecutor
extends SchedulerBenchmarkExecutorBase {
@Param("BATCH")
private JobConfiguration jobConfiguration;
private DeployingDownstreamTasksInBatchJobBenchmark benchmark;
public static void main(String[] args) throws RunnerException {
runBenchmark(DeployingDownstreamTasksInBatchJobBenchmarkExecutor.class);
}
@Setup(Level.Trial)
public void setup() throws Exception {
benchmark = new DeployingDownstreamTasksInBatchJobBenchmark();
benchmark.setup(jobConfiguration);
}
@Benchmark
@BenchmarkMode(Mode.SingleShotTime)
public void deployDownstreamTasks() throws Exception {
benchmark.deployDownstreamTasks();
}
@TearDown(Level.Trial)
public void teardown() {
benchmark.teardown();
}
}
| 4,651 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark/e2e/CreateSchedulerBenchmarkExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.scheduler.benchmark.e2e;
import org.apache.flink.runtime.scheduler.benchmark.JobConfiguration;
import org.apache.flink.runtime.scheduler.benchmark.e2e.CreateSchedulerBenchmark;
import org.apache.flink.scheduler.benchmark.SchedulerBenchmarkExecutorBase;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.infra.Blackhole;
import org.openjdk.jmh.runner.RunnerException;
/** The benchmark of creating the scheduler in a STREAMING/BATCH job. */
public class CreateSchedulerBenchmarkExecutor extends SchedulerBenchmarkExecutorBase {
@Param({"BATCH", "STREAMING"})
private JobConfiguration jobConfiguration;
private CreateSchedulerBenchmark benchmark;
public static void main(String[] args) throws RunnerException {
runBenchmark(CreateSchedulerBenchmarkExecutor.class);
}
@Setup(Level.Trial)
public void setup() throws Exception {
benchmark = new CreateSchedulerBenchmark();
benchmark.setup(jobConfiguration);
}
@Benchmark
@BenchmarkMode(Mode.SingleShotTime)
public void createScheduler(Blackhole blackhole) throws Exception {
blackhole.consume(benchmark.createScheduler());
}
@TearDown(Level.Trial)
public void teardown() {
benchmark.teardown();
}
}
| 4,652 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark/e2e/HandleGlobalFailureAndRestartAllTasksBenchmarkExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.scheduler.benchmark.e2e;
import org.apache.flink.runtime.scheduler.benchmark.JobConfiguration;
import org.apache.flink.runtime.scheduler.benchmark.e2e.HandleGlobalFailureAndRestartAllTasksBenchmark;
import org.apache.flink.scheduler.benchmark.SchedulerBenchmarkExecutorBase;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.runner.RunnerException;
/** The benchmark of handle global failure and restarting tasks in a STREAMING/BATCH job. */
public class HandleGlobalFailureAndRestartAllTasksBenchmarkExecutor extends SchedulerBenchmarkExecutorBase {
@Param({"BATCH", "STREAMING", "BATCH_EVENLY", "STREAMING_EVENLY"})
private JobConfiguration jobConfiguration;
private HandleGlobalFailureAndRestartAllTasksBenchmark benchmark;
public static void main(String[] args) throws RunnerException {
runBenchmark(HandleGlobalFailureAndRestartAllTasksBenchmarkExecutor.class);
}
@Setup(Level.Trial)
public void setup() throws Exception {
benchmark = new HandleGlobalFailureAndRestartAllTasksBenchmark();
benchmark.setup(jobConfiguration);
}
@Benchmark
@BenchmarkMode(Mode.SingleShotTime)
public void handleGlobalFailureAndRestartAllTasks() throws Exception {
benchmark.handleGlobalFailureAndRestartAllTasks();
}
@TearDown(Level.Trial)
public void teardown() {
benchmark.teardown();
}
}
| 4,653 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark/e2e/SchedulingAndDeployingBenchmarkExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.scheduler.benchmark.e2e;
import org.apache.flink.runtime.scheduler.benchmark.JobConfiguration;
import org.apache.flink.runtime.scheduler.benchmark.e2e.SchedulingAndDeployingBenchmark;
import org.apache.flink.scheduler.benchmark.SchedulerBenchmarkExecutorBase;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.runner.RunnerException;
/** The benchmark of scheduling and deploying tasks in a STREAMING/BATCH job. */
public class SchedulingAndDeployingBenchmarkExecutor extends SchedulerBenchmarkExecutorBase {
@Param({"BATCH", "STREAMING"})
private JobConfiguration jobConfiguration;
private SchedulingAndDeployingBenchmark benchmark;
public static void main(String[] args) throws RunnerException {
runBenchmark(SchedulingAndDeployingBenchmarkExecutor.class);
}
@Setup(Level.Trial)
public void setup() throws Exception {
benchmark = new SchedulingAndDeployingBenchmark();
benchmark.setup(jobConfiguration);
}
@Benchmark
@BenchmarkMode(Mode.SingleShotTime)
public void startScheduling() throws Exception {
benchmark.startScheduling();
}
@TearDown(Level.Trial)
public void teardown() {
benchmark.teardown();
}
}
| 4,654 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark/failover/RegionToRestartInBatchJobBenchmarkExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.scheduler.benchmark.failover;
import org.apache.flink.runtime.scheduler.benchmark.JobConfiguration;
import org.apache.flink.runtime.scheduler.benchmark.failover.RegionToRestartInBatchJobBenchmark;
import org.apache.flink.scheduler.benchmark.SchedulerBenchmarkExecutorBase;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.infra.Blackhole;
import org.openjdk.jmh.runner.RunnerException;
/** The benchmark of calculating the regions to restart when failover occurs in a BATCH job. */
public class RegionToRestartInBatchJobBenchmarkExecutor extends SchedulerBenchmarkExecutorBase {
@Param("BATCH")
private JobConfiguration jobConfiguration;
private RegionToRestartInBatchJobBenchmark benchmark;
public static void main(String[] args) throws RunnerException {
runBenchmark(RegionToRestartInBatchJobBenchmarkExecutor.class);
}
@Setup(Level.Trial)
public void setup() throws Exception {
benchmark = new RegionToRestartInBatchJobBenchmark();
benchmark.setup(jobConfiguration);
}
@Benchmark
@BenchmarkMode(Mode.SingleShotTime)
public void calculateRegionToRestart(Blackhole blackhole) {
blackhole.consume(benchmark.calculateRegionToRestart());
}
@TearDown(Level.Trial)
public void teardown() {
benchmark.teardown();
}
}
| 4,655 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark/failover/RegionToRestartInStreamingJobBenchmarkExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.scheduler.benchmark.failover;
import org.apache.flink.runtime.scheduler.benchmark.JobConfiguration;
import org.apache.flink.runtime.scheduler.benchmark.failover.RegionToRestartInStreamingJobBenchmark;
import org.apache.flink.scheduler.benchmark.SchedulerBenchmarkExecutorBase;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.infra.Blackhole;
import org.openjdk.jmh.runner.RunnerException;
/** The benchmark of calculating region to restart when failover occurs in a STREAMING job. */
public class RegionToRestartInStreamingJobBenchmarkExecutor extends SchedulerBenchmarkExecutorBase {
@Param("STREAMING")
private JobConfiguration jobConfiguration;
private RegionToRestartInStreamingJobBenchmark benchmark;
public static void main(String[] args) throws RunnerException {
runBenchmark(RegionToRestartInStreamingJobBenchmarkExecutor.class);
}
@Setup(Level.Trial)
public void setup() throws Exception {
benchmark = new RegionToRestartInStreamingJobBenchmark();
benchmark.setup(jobConfiguration);
}
@Benchmark
@BenchmarkMode(Mode.SingleShotTime)
public void calculateRegionToRestart(Blackhole blackhole) {
blackhole.consume(benchmark.calculateRegionToRestart());
}
@TearDown(Level.Trial)
public void teardown() {
benchmark.teardown();
}
}
| 4,656 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/scheduler/benchmark/partitionrelease/PartitionReleaseInBatchJobBenchmarkExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.scheduler.benchmark.partitionrelease;
import org.apache.flink.runtime.scheduler.benchmark.JobConfiguration;
import org.apache.flink.runtime.scheduler.benchmark.partitionrelease.PartitionReleaseInBatchJobBenchmark;
import org.apache.flink.scheduler.benchmark.SchedulerBenchmarkExecutorBase;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.runner.RunnerException;
/** The benchmark of releasing partitions in a BATCH job. */
public class PartitionReleaseInBatchJobBenchmarkExecutor extends SchedulerBenchmarkExecutorBase {
@Param("BATCH")
private JobConfiguration jobConfiguration;
private PartitionReleaseInBatchJobBenchmark benchmark;
public static void main(String[] args) throws RunnerException {
runBenchmark(PartitionReleaseInBatchJobBenchmarkExecutor.class);
}
@Setup(Level.Trial)
public void setup() throws Exception {
benchmark = new PartitionReleaseInBatchJobBenchmark();
benchmark.setup(jobConfiguration);
}
@Benchmark
@BenchmarkMode(Mode.SingleShotTime)
public void partitionRelease() {
benchmark.partitionRelease();
}
@TearDown(Level.Trial)
public void teardown() {
benchmark.teardown();
}
}
| 4,657 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/state | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/state/benchmark/RocksdbStateBackendRescalingBenchmarkExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.state.benchmark;
import org.apache.flink.api.common.JobID;
import org.apache.flink.config.ConfigUtil;
import org.apache.flink.config.StateBenchmarkOptions;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.state.benchmark.RescalingBenchmarkBuilder;
import org.apache.flink.runtime.state.storage.FileSystemCheckpointStorage;
import org.openjdk.jmh.annotations.*;
import org.openjdk.jmh.runner.RunnerException;
import java.io.IOException;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.openjdk.jmh.annotations.Mode.AverageTime;
@OutputTimeUnit(MILLISECONDS)
@BenchmarkMode(AverageTime)
@Warmup(iterations = 3)
public class RocksdbStateBackendRescalingBenchmarkExecutor extends RescalingBenchmarkBase {
// numberOfKeys = 10_000_000, keyLen = 96, valueLen = 128, state size ~= 2.2GB
private final int numberOfKeys = 10_000_000;
private final int keyLen = 96;
public static void main(String[] args) throws RunnerException {
runBenchmark(RocksdbStateBackendRescalingBenchmarkExecutor.class);
}
@Setup(Level.Trial)
public void setUp() throws Exception {
EmbeddedRocksDBStateBackend stateBackend = new EmbeddedRocksDBStateBackend(true);
benchmark =
new RescalingBenchmarkBuilder<byte[]>()
.setMaxParallelism(128)
.setParallelismBefore(rescaleType.getParallelismBefore())
.setParallelismAfter(rescaleType.getParallelismAfter())
.setManagedMemorySize(512 * 1024 * 1024)
.setCheckpointStorageAccess(
new FileSystemCheckpointStorage("file://" + prepareDirectory("rescaleDb").getAbsolutePath())
.createCheckpointStorage(new JobID()))
.setStateBackend(stateBackend)
.setStreamRecordGenerator(new ByteArrayRecordGenerator(numberOfKeys, keyLen))
.setStateProcessFunctionSupplier(TestKeyedFunction::new)
.build();
benchmark.setUp();
}
@Setup(Level.Invocation)
public void setUpPerInvocation() throws Exception {
benchmark.prepareStateForOperator(rescaleType.getSubtaskIndex());
}
@TearDown(Level.Trial)
public void tearDown() throws IOException {
benchmark.tearDown();
}
@Benchmark
public void rescaleRocksDB() throws Exception {
benchmark.rescale();
}
@TearDown(Level.Invocation)
public void tearDownPerInvocation() throws Exception {
benchmark.closeOperator();
}
}
| 4,658 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/state | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/state/benchmark/ListStateBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.state.benchmark;
import org.apache.flink.api.common.state.ListState;
import org.apache.flink.api.common.state.ListStateDescriptor;
import org.apache.flink.contrib.streaming.state.RocksDBKeyedStateBackend;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.infra.Blackhole;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import static org.apache.flink.state.benchmark.StateBackendBenchmarkUtils.applyToAllKeys;
import static org.apache.flink.state.benchmark.StateBackendBenchmarkUtils.compactState;
import static org.apache.flink.state.benchmark.StateBackendBenchmarkUtils.getListState;
import static org.apache.flink.state.benchmark.StateBenchmarkConstants.listValueCount;
import static org.apache.flink.state.benchmark.StateBenchmarkConstants.setupKeyCount;
/** Implementation for list state benchmark testing. */
public class ListStateBenchmark extends StateBenchmarkBase {
private final String STATE_NAME = "listState";
private final ListStateDescriptor<Long> STATE_DESC =
new ListStateDescriptor<>(STATE_NAME, Long.class);
private ListState<Long> listState;
private List<Long> dummyLists;
public static void main(String[] args) throws RunnerException {
Options opt =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(".*" + ListStateBenchmark.class.getCanonicalName() + ".*")
.build();
new Runner(opt).run();
}
@Setup
public void setUp() throws Exception {
keyedStateBackend = createKeyedStateBackend();
listState = getListState(keyedStateBackend, STATE_DESC);
dummyLists = new ArrayList<>(listValueCount);
for (int i = 0; i < listValueCount; ++i) {
dummyLists.add(random.nextLong());
}
keyIndex = new AtomicInteger();
}
@Setup(Level.Iteration)
public void setUpPerIteration() throws Exception {
for (int i = 0; i < setupKeyCount; ++i) {
keyedStateBackend.setCurrentKey((long) i);
listState.add(random.nextLong());
}
// make sure only one sst file left, so all get invocation will access this single file,
// to prevent the spike caused by different key distribution in multiple sst files,
// the more access to the older sst file, the lower throughput will be.
if (keyedStateBackend instanceof RocksDBKeyedStateBackend) {
RocksDBKeyedStateBackend<Long> rocksDBKeyedStateBackend =
(RocksDBKeyedStateBackend<Long>) keyedStateBackend;
compactState(rocksDBKeyedStateBackend, STATE_DESC);
}
}
@TearDown(Level.Iteration)
public void tearDownPerIteration() throws Exception {
applyToAllKeys(
keyedStateBackend,
STATE_DESC,
(k, state) -> {
keyedStateBackend.setCurrentKey(k);
state.clear();
});
// make the clearance effective, trigger compaction for RocksDB, and GC for heap.
if (keyedStateBackend instanceof RocksDBKeyedStateBackend) {
RocksDBKeyedStateBackend<Long> rocksDBKeyedStateBackend =
(RocksDBKeyedStateBackend<Long>) keyedStateBackend;
compactState(rocksDBKeyedStateBackend, STATE_DESC);
} else {
System.gc();
}
// wait a while for the clearance to take effect.
Thread.sleep(1000);
}
@Benchmark
public void listUpdate(KeyValue keyValue) throws Exception {
keyedStateBackend.setCurrentKey(keyValue.setUpKey);
listState.update(keyValue.listValue);
}
@Benchmark
public void listAdd(KeyValue keyValue) throws Exception {
keyedStateBackend.setCurrentKey(keyValue.newKey);
listState.update(keyValue.listValue);
}
@Benchmark
public void listAppend(KeyValue keyValue) throws Exception {
keyedStateBackend.setCurrentKey(keyValue.setUpKey);
listState.add(keyValue.value);
}
@Benchmark
public Iterable<Long> listGet(KeyValue keyValue) throws Exception {
keyedStateBackend.setCurrentKey(keyValue.setUpKey);
return listState.get();
}
@Benchmark
public void listGetAndIterate(KeyValue keyValue, Blackhole bh) throws Exception {
keyedStateBackend.setCurrentKey(keyValue.setUpKey);
Iterable<Long> iterable = listState.get();
for (Long value : iterable) {
bh.consume(value);
}
}
@Benchmark
public void listAddAll(KeyValue keyValue) throws Exception {
keyedStateBackend.setCurrentKey(keyValue.setUpKey);
listState.addAll(dummyLists);
}
}
| 4,659 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/state | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/state/benchmark/ValueStateBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.state.benchmark;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
import static org.apache.flink.state.benchmark.StateBackendBenchmarkUtils.getValueState;
import static org.apache.flink.state.benchmark.StateBenchmarkConstants.setupKeyCount;
/** Implementation for listValue state benchmark testing. */
public class ValueStateBenchmark extends StateBenchmarkBase {
private ValueState<Long> valueState;
public static void main(String[] args) throws RunnerException {
Options opt =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(".*" + ValueStateBenchmark.class.getCanonicalName() + ".*")
.build();
new Runner(opt).run();
}
@Setup
public void setUp() throws Exception {
keyedStateBackend = createKeyedStateBackend();
valueState =
getValueState(keyedStateBackend, new ValueStateDescriptor<>("kvState", Long.class));
for (int i = 0; i < setupKeyCount; ++i) {
keyedStateBackend.setCurrentKey((long) i);
valueState.update(random.nextLong());
}
keyIndex = new AtomicInteger();
}
@Benchmark
public void valueUpdate(KeyValue keyValue) throws IOException {
keyedStateBackend.setCurrentKey(keyValue.setUpKey);
valueState.update(keyValue.value);
}
@Benchmark
public void valueAdd(KeyValue keyValue) throws IOException {
keyedStateBackend.setCurrentKey(keyValue.newKey);
valueState.update(keyValue.value);
}
@Benchmark
public Long valueGet(KeyValue keyValue) throws IOException {
keyedStateBackend.setCurrentKey(keyValue.setUpKey);
return valueState.value();
}
}
| 4,660 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/state | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/state/benchmark/MapStateBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.state.benchmark;
import org.apache.flink.api.common.state.MapState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.infra.Blackhole;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import static org.apache.flink.state.benchmark.StateBackendBenchmarkUtils.getMapState;
import static org.apache.flink.state.benchmark.StateBenchmarkConstants.mapKeyCount;
import static org.apache.flink.state.benchmark.StateBenchmarkConstants.mapKeys;
import static org.apache.flink.state.benchmark.StateBenchmarkConstants.setupKeyCount;
/** Implementation for map state benchmark testing. */
public class MapStateBenchmark extends StateBenchmarkBase {
private MapState<Long, Double> mapState;
private Map<Long, Double> dummyMaps;
public static void main(String[] args) throws RunnerException {
Options opt =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(".*" + MapStateBenchmark.class.getCanonicalName() + ".*")
.build();
new Runner(opt).run();
}
@Setup
public void setUp() throws Exception {
keyedStateBackend = createKeyedStateBackend();
mapState =
getMapState(
keyedStateBackend,
new MapStateDescriptor<>("mapState", Long.class, Double.class));
dummyMaps = new HashMap<>(mapKeyCount);
for (int i = 0; i < mapKeyCount; ++i) {
dummyMaps.put(mapKeys.get(i), random.nextDouble());
}
for (int i = 0; i < setupKeyCount; ++i) {
keyedStateBackend.setCurrentKey((long) i);
for (int j = 0; j < mapKeyCount; j++) {
mapState.put(mapKeys.get(j), random.nextDouble());
}
}
keyIndex = new AtomicInteger();
}
@Benchmark
public void mapUpdate(KeyValue keyValue) throws Exception {
keyedStateBackend.setCurrentKey(keyValue.setUpKey);
mapState.put(keyValue.mapKey, keyValue.mapValue);
}
@Benchmark
public void mapAdd(KeyValue keyValue) throws Exception {
keyedStateBackend.setCurrentKey(keyValue.newKey);
mapState.put(keyValue.mapKey, keyValue.mapValue);
}
@Benchmark
public Double mapGet(KeyValue keyValue) throws Exception {
keyedStateBackend.setCurrentKey(keyValue.setUpKey);
return mapState.get(keyValue.mapKey);
}
@Benchmark
public boolean mapContains(KeyValue keyValue) throws Exception {
keyedStateBackend.setCurrentKey(keyValue.setUpKey);
return mapState.contains(keyValue.mapKey << 1);
}
@Benchmark
public boolean mapIsEmpty(KeyValue keyValue) throws Exception {
keyedStateBackend.setCurrentKey(keyValue.setUpKey);
return mapState.isEmpty();
}
@Benchmark
@OperationsPerInvocation(mapKeyCount)
public void mapKeys(KeyValue keyValue, Blackhole bh) throws Exception {
keyedStateBackend.setCurrentKey(keyValue.setUpKey);
for (Long key : mapState.keys()) {
bh.consume(key);
}
}
@Benchmark
@OperationsPerInvocation(mapKeyCount)
public void mapValues(KeyValue keyValue, Blackhole bh) throws Exception {
keyedStateBackend.setCurrentKey(keyValue.setUpKey);
for (Double value : mapState.values()) {
bh.consume(value);
}
}
@Benchmark
@OperationsPerInvocation(mapKeyCount)
public void mapEntries(KeyValue keyValue, Blackhole bh) throws Exception {
keyedStateBackend.setCurrentKey(keyValue.setUpKey);
Iterable<Map.Entry<Long, Double>> iterable = mapState.entries();
if (iterable != null) {
for (Map.Entry<Long, Double> entry : mapState.entries()) {
bh.consume(entry.getKey());
bh.consume(entry.getValue());
}
}
}
@Benchmark
@OperationsPerInvocation(mapKeyCount)
public void mapIterator(KeyValue keyValue, Blackhole bh) throws Exception {
keyedStateBackend.setCurrentKey(keyValue.setUpKey);
Iterator<Map.Entry<Long, Double>> iterator = mapState.iterator();
while (iterator.hasNext()) {
Map.Entry<Long, Double> entry = iterator.next();
bh.consume(entry.getKey());
bh.consume(entry.getValue());
}
}
@Benchmark
public void mapRemove(KeyValue keyValue) throws Exception {
keyedStateBackend.setCurrentKey(keyValue.setUpKey);
mapState.remove(keyValue.mapKey);
}
@Benchmark
public void mapPutAll(KeyValue keyValue) throws Exception {
keyedStateBackend.setCurrentKey(keyValue.setUpKey);
mapState.putAll(dummyMaps);
}
}
| 4,661 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/state | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/state/benchmark/StateBenchmarkConstants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.state.benchmark;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Random;
/**
* Constants for state benchmark tests. Also generates random keys/values in advance to avoid
* possible affect of using {@link Random#nextLong()}
*/
class StateBenchmarkConstants {
// TODO: why all of those static fields? Those should be inside a context class
static final int mapKeyCount = 10;
static final int listValueCount = 100;
static final int setupKeyCount = 500_000;
static final String rootDirName = "benchmark";
static final String recoveryDirName = "localRecovery";
static final String dbDirName = "dbPath";
static final ArrayList<Long> mapKeys = new ArrayList<>(mapKeyCount);
static final ArrayList<Double> mapValues = new ArrayList<>(mapKeyCount);
static final ArrayList<Long> setupKeys = new ArrayList<>(setupKeyCount);
static final int newKeyCount = 500_000;
static final ArrayList<Long> newKeys = new ArrayList<>(newKeyCount);
static final int randomValueCount = 1_000_000;
static final ArrayList<Long> randomValues = new ArrayList<>(randomValueCount);
static {
for (int i = 0; i < mapKeyCount; i++) {
mapKeys.add((long) i);
}
Collections.shuffle(mapKeys);
}
static {
Random random = new Random();
for (int i = 0; i < mapKeyCount; i++) {
mapValues.add(random.nextDouble());
}
Collections.shuffle(mapValues);
}
static {
for (long i = 0; i < setupKeyCount; i++) {
setupKeys.add(i);
}
Collections.shuffle(setupKeys);
}
static {
for (long i = 0; i < newKeyCount; i++) {
newKeys.add(i + setupKeyCount);
}
Collections.shuffle(newKeys);
}
static {
for (long i = 0; i < randomValueCount; i++) {
randomValues.add(i);
}
Collections.shuffle(randomValues);
}
}
| 4,662 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/state | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/state/benchmark/StateBenchmarkBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.state.benchmark;
import org.apache.flink.benchmark.BenchmarkBase;
import org.apache.flink.config.ConfigUtil;
import org.apache.flink.config.StateBenchmarkOptions;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.KeyedStateBackend;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicInteger;
import static org.apache.flink.state.benchmark.StateBackendBenchmarkUtils.cleanUp;
import static org.apache.flink.state.benchmark.StateBenchmarkConstants.mapKeyCount;
import static org.apache.flink.state.benchmark.StateBenchmarkConstants.mapKeys;
import static org.apache.flink.state.benchmark.StateBenchmarkConstants.mapValues;
import static org.apache.flink.state.benchmark.StateBenchmarkConstants.newKeyCount;
import static org.apache.flink.state.benchmark.StateBenchmarkConstants.newKeys;
import static org.apache.flink.state.benchmark.StateBenchmarkConstants.randomValueCount;
import static org.apache.flink.state.benchmark.StateBenchmarkConstants.randomValues;
import static org.apache.flink.state.benchmark.StateBenchmarkConstants.setupKeyCount;
import static org.apache.flink.state.benchmark.StateBenchmarkConstants.setupKeys;
/** Base implementation of the state benchmarks. */
public class StateBenchmarkBase extends BenchmarkBase {
// TODO: why AtomicInteger?
static AtomicInteger keyIndex;
final ThreadLocalRandom random = ThreadLocalRandom.current();
@Param({"HEAP", "ROCKSDB", "ROCKSDB_CHANGELOG"})
private StateBackendBenchmarkUtils.StateBackendType backendType;
KeyedStateBackend<Long> keyedStateBackend;
protected KeyedStateBackend<Long> createKeyedStateBackend() throws Exception {
Configuration benchMarkConfig = ConfigUtil.loadBenchMarkConf();
String stateDataDirPath = benchMarkConfig.getString(StateBenchmarkOptions.STATE_DATA_DIR);
File dataDir = null;
if (stateDataDirPath != null) {
dataDir = new File(stateDataDirPath);
if (!dataDir.exists()) {
Files.createDirectories(Paths.get(stateDataDirPath));
}
}
return StateBackendBenchmarkUtils.createKeyedStateBackend(backendType, dataDir);
}
private static int getCurrentIndex() {
int currentIndex = keyIndex.getAndIncrement();
if (currentIndex == Integer.MAX_VALUE) {
keyIndex.set(0);
}
return currentIndex;
}
@TearDown
public void tearDown() throws IOException {
cleanUp(keyedStateBackend);
}
@State(Scope.Thread)
public static class KeyValue {
long newKey;
long setUpKey;
long mapKey;
double mapValue;
long value;
List<Long> listValue;
@Setup(Level.Invocation)
public void kvSetup() {
int currentIndex = getCurrentIndex();
setUpKey = setupKeys.get(currentIndex % setupKeyCount);
newKey = newKeys.get(currentIndex % newKeyCount);
mapKey = mapKeys.get(currentIndex % mapKeyCount);
mapValue = mapValues.get(currentIndex % mapKeyCount);
value = randomValues.get(currentIndex % randomValueCount);
// TODO: singletonList is taking 25% of time in mapAdd benchmark... This shouldn't be
// initiated if benchmark is not using it and for the benchmarks that are using it,
// this should also be probably somehow avoided.
listValue =
Collections.singletonList(randomValues.get(currentIndex % randomValueCount));
}
@TearDown(Level.Invocation)
public void kvTearDown() {
listValue = null;
}
}
}
| 4,663 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/state | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/state/benchmark/RescalingBenchmarkBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.state.benchmark;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.common.typeinfo.PrimitiveArrayTypeInfo;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.benchmark.BenchmarkBase;
import org.apache.flink.config.ConfigUtil;
import org.apache.flink.config.StateBenchmarkOptions;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.state.benchmark.RescalingBenchmark;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
import org.apache.flink.util.Collector;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.Iterator;
import java.util.Random;
public class RescalingBenchmarkBase extends BenchmarkBase {
@Param({"RESCALE_IN", "RESCALE_OUT"})
protected RescaleType rescaleType;
protected RescalingBenchmark<byte[]> benchmark;
public static void runBenchmark(Class<?> clazz) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(".*" + clazz.getCanonicalName() + ".*")
.build();
new Runner(options).run();
}
protected static File prepareDirectory(String prefix) throws IOException {
Configuration benchMarkConfig = ConfigUtil.loadBenchMarkConf();
String stateDataDirPath = benchMarkConfig.getString(StateBenchmarkOptions.STATE_DATA_DIR);
File dataDir = null;
if (stateDataDirPath != null) {
dataDir = new File(stateDataDirPath);
if (!dataDir.exists()) {
Files.createDirectories(Paths.get(stateDataDirPath));
}
}
File target = File.createTempFile(prefix, "", dataDir);
if (target.exists() && !target.delete()) {
throw new IOException("Target dir {" + target.getAbsolutePath() + "} exists but failed to clean it up");
} else if (!target.mkdirs()) {
throw new IOException("Failed to create target directory: " + target.getAbsolutePath());
} else {
return target;
}
}
@State(Scope.Thread)
public enum RescaleType {
RESCALE_OUT(1, 2, 0),
RESCALE_IN(2, 1, 0);
private final int parallelismBefore;
private final int parallelismAfter;
private final int subtaskIndex;
RescaleType(int parallelismBefore, int parallelismAfter, int subtaskIdx) {
this.parallelismBefore = parallelismBefore;
this.parallelismAfter = parallelismAfter;
this.subtaskIndex = subtaskIdx;
}
public int getParallelismBefore() {
return parallelismBefore;
}
public int getParallelismAfter() {
return parallelismAfter;
}
public int getSubtaskIndex() {
return subtaskIndex;
}
}
protected static class ByteArrayRecordGenerator
implements RescalingBenchmark.StreamRecordGenerator<byte[]> {
private final Random random = new Random(0);
private final int numberOfKeys;
private final byte[] fatArray;
private int count = 0;
protected ByteArrayRecordGenerator(final int numberOfKeys,
final int keyLen) {
this.numberOfKeys = numberOfKeys;
fatArray = new byte[keyLen];
}
// generate deterministic elements for source
@Override
public Iterator<StreamRecord<byte[]>> generate() {
return new Iterator<StreamRecord<byte[]>>() {
@Override
public boolean hasNext() {
return count < numberOfKeys;
}
@Override
public StreamRecord<byte[]> next() {
random.nextBytes(fatArray);
changePrefixOfArray(count, fatArray);
// make the hashcode of keys different.
StreamRecord<byte[]> record =
new StreamRecord<>(Arrays.copyOf(fatArray, fatArray.length), 0);
count += 1;
return record;
}
};
}
@Override
public TypeInformation getTypeInformation() {
return PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO;
}
private void changePrefixOfArray(int number, byte[] fatArray) {
fatArray[0] = (byte) ((number >> 24) & 0xFF);
fatArray[1] = (byte) ((number >> 16) & 0xFF);
fatArray[2] = (byte) ((number >> 8) & 0xFF);
fatArray[3] = (byte) (number & 0xFF);
}
}
protected static class TestKeyedFunction extends KeyedProcessFunction<byte[], byte[], Void> {
private static final long serialVersionUID = 1L;
private final Random random = new Random(0);
private final int valueLen = 128;
private ValueState<byte[]> randomState;
private final byte[] stateArray = new byte[valueLen];
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
randomState =
this.getRuntimeContext()
.getState(new ValueStateDescriptor<>("RandomState", byte[].class));
}
@Override
public void processElement(
byte[] value,
KeyedProcessFunction<byte[], byte[], Void>.Context ctx,
Collector<Void> out)
throws Exception {
random.nextBytes(stateArray);
randomState.update(Arrays.copyOf(stateArray, stateArray.length));
}
}
}
| 4,664 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/state | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/state/benchmark/HashMapStateBackendRescalingBenchmarkExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.state.benchmark;
import org.apache.flink.api.common.JobID;
import org.apache.flink.config.ConfigUtil;
import org.apache.flink.config.StateBenchmarkOptions;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.state.benchmark.RescalingBenchmarkBuilder;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.runtime.state.storage.FileSystemCheckpointStorage;
import org.openjdk.jmh.annotations.*;
import org.openjdk.jmh.runner.RunnerException;
import java.io.IOException;
import java.net.URI;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.openjdk.jmh.annotations.Mode.AverageTime;
@OutputTimeUnit(MILLISECONDS)
@BenchmarkMode(AverageTime)
@Warmup(iterations = 3)
public class HashMapStateBackendRescalingBenchmarkExecutor extends RescalingBenchmarkBase {
// numberOfKeys = 1250000, keyLen = 96, valueLen = 128, state size ~= 270MB
private final int numberOfKeys = 1250000;
private final int keyLen = 96;
public static void main(String[] args) throws RunnerException {
runBenchmark(HashMapStateBackendRescalingBenchmarkExecutor.class);
}
@Setup(Level.Trial)
public void setUp() throws Exception {
// FsStateBackend is deprecated in favor of HashMapStateBackend with setting checkpointStorage.
HashMapStateBackend stateBackend = new HashMapStateBackend();
benchmark =
new RescalingBenchmarkBuilder<byte[]>()
.setMaxParallelism(128)
.setParallelismBefore(rescaleType.getParallelismBefore())
.setParallelismAfter(rescaleType.getParallelismAfter())
.setCheckpointStorageAccess(
new FileSystemCheckpointStorage(new URI("file://" + prepareDirectory("rescaleDb").getAbsolutePath()), 0)
.createCheckpointStorage(new JobID()))
.setStateBackend(stateBackend)
.setStreamRecordGenerator(new ByteArrayRecordGenerator(numberOfKeys, keyLen))
.setStateProcessFunctionSupplier(TestKeyedFunction::new)
.build();
benchmark.setUp();
}
@Setup(Level.Invocation)
public void setUpPerInvocation() throws Exception {
benchmark.prepareStateForOperator(rescaleType.getSubtaskIndex());
}
@TearDown(Level.Trial)
public void tearDown() throws IOException {
benchmark.tearDown();
}
@Benchmark
public void rescaleHeap() throws Exception {
benchmark.rescale();
}
@TearDown(Level.Invocation)
public void tearDownPerInvocation() throws Exception {
benchmark.closeOperator();
}
}
| 4,665 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/JobSystemArchTest.java | package com.paypal.jobsystem;
import com.paypal.testsupport.archrules.SliceLayeredModuleLayerProtectionRules;
import com.paypal.testsupport.archrules.SliceLayeredModulePackageStructureRules;
import com.tngtech.archunit.core.importer.ImportOption;
import com.tngtech.archunit.junit.AnalyzeClasses;
import com.tngtech.archunit.junit.ArchTest;
import com.tngtech.archunit.junit.ArchTests;
@AnalyzeClasses(packages = "com.paypal.jobsystem", importOptions = ImportOption.DoNotIncludeTests.class)
public class JobSystemArchTest {
@ArchTest
public static final ArchTests packageRules = ArchTests.in(SliceLayeredModulePackageStructureRules.class);
@ArchTest
public static final ArchTests layerRules = ArchTests.in(SliceLayeredModuleLayerProtectionRules.class);
}
| 4,666 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures/support/AbstractCachingFailedItemsBatchJobItemsExtractorTest.java | package com.paypal.jobsystem.batchjobfailures.support;
import com.paypal.jobsystem.batchjob.model.BatchJobContext;
import com.paypal.jobsystem.batchjobfailures.services.resolvepolicies.BatchJobFailedItemCacheFailureResolvePolicy;
import com.paypal.jobsystem.batchjobfailures.services.cache.BatchJobFailedItemCacheFailureResolver;
import com.paypal.jobsystem.batchjobfailures.services.cache.BatchJobFailedItemCacheService;
import com.paypal.jobsystem.batchjobfailures.repositories.entities.BatchJobFailedItem;
import com.paypal.jobsystem.batchjobfailures.services.BatchJobFailedItemService;
import com.paypal.jobsystem.batchjobfailures.support.AbstractCachingFailedItemsBatchJobItemsExtractor;
import com.paypal.jobsystem.batchjobsupport.support.AbstractBatchJobItem;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.*;
import org.mockito.junit.jupiter.MockitoExtension;
import java.util.*;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.*;
@ExtendWith(MockitoExtension.class)
class AbstractCachingFailedItemsBatchJobItemsExtractorTest {
@InjectMocks
@Spy
private MyCachingFailedItemsBatchJobItemsExtractorTest testObj;
@Mock
private BatchJobFailedItemCacheService batchJobFailedItemCacheServiceMock;
@Mock
private BatchJobFailedItem batchJobFailedItem1Mock, batchJobFailedItem2Mock;
@Mock
private MyItem batchJobItem1Mock, batchJobItem2Mock;
@Mock
private List<BatchJobFailedItem> batchJobFailedItemsMock;
@Mock
private BatchJobFailedItemCacheFailureResolvePolicy batchJobFailedItemCacheFailureResolvePolicyMock;
@Captor
private ArgumentCaptor<BatchJobFailedItemCacheFailureResolver> cacheFailureResolverCaptor;
@Test
void getBatchJobFailedItems_shouldRetrieveItemsFromCache() {
final HashMap<BatchJobFailedItem, Optional<MyItem>> cacheResponse = buildCacheResponse();
when(batchJobFailedItemCacheServiceMock.retrieveAllItems(eq(MyItem.class), any(), any()))
.thenReturn(cacheResponse);
final Collection<MyItem> result = testObj.getBatchJobFailedItems(batchJobFailedItemsMock);
verify(testObj, times(0)).getItems((List<String>) any());
assertThat(result).containsExactlyInAnyOrder(batchJobItem1Mock, batchJobItem2Mock);
}
@Test
void getBatchJobFailedItems_shouldPassGetItemsMethodAsCacheResolver() {
final HashMap<BatchJobFailedItem, Optional<MyItem>> cacheResponse = buildCacheResponse();
when(batchJobFailedItemCacheServiceMock.retrieveAllItems(eq(MyItem.class), any(), any()))
.thenReturn(cacheResponse);
testObj.getBatchJobFailedItems(batchJobFailedItemsMock);
verify(batchJobFailedItemCacheServiceMock, times(1)).retrieveAllItems(any(), any(),
cacheFailureResolverCaptor.capture());
// we use MyList custom type to check if is passing getItems as cache failure
// resolver
assertThat((cacheFailureResolverCaptor.getValue()).itemsToBeCached(Collections.emptyList()))
.isInstanceOf(MyList.class);
}
@Test
void getBatchJobFailedItems_shouldPassCustomCacheResolvePolicy() {
final HashMap<BatchJobFailedItem, Optional<MyItem>> cacheResponse = buildCacheResponse();
when(batchJobFailedItemCacheServiceMock.retrieveAllItems(eq(MyItem.class), any(), any(), any()))
.thenReturn(cacheResponse);
testObj.policy = Optional.of(batchJobFailedItemCacheFailureResolvePolicyMock);
testObj.getBatchJobFailedItems(batchJobFailedItemsMock);
verify(batchJobFailedItemCacheServiceMock, times(1)).retrieveAllItems(any(), any(),
cacheFailureResolverCaptor.capture(), eq(batchJobFailedItemCacheFailureResolvePolicyMock));
}
private HashMap<BatchJobFailedItem, Optional<MyItem>> buildCacheResponse() {
final HashMap<BatchJobFailedItem, Optional<MyItem>> cacheResponse = new HashMap<>();
cacheResponse.put(batchJobFailedItem1Mock, Optional.of(batchJobItem1Mock));
cacheResponse.put(batchJobFailedItem2Mock, Optional.of(batchJobItem2Mock));
return cacheResponse;
}
static class MyCachingFailedItemsBatchJobItemsExtractorTest
extends AbstractCachingFailedItemsBatchJobItemsExtractor<BatchJobContext, MyItem> {
Optional<BatchJobFailedItemCacheFailureResolvePolicy> policy = Optional.empty();
protected MyCachingFailedItemsBatchJobItemsExtractorTest(final String itemType,
final BatchJobFailedItemService batchJobFailedItemService,
final BatchJobFailedItemCacheService batchJobFailedItemCacheService) {
super(MyItem.class, itemType, batchJobFailedItemService, batchJobFailedItemCacheService);
}
@Override
protected Collection<MyItem> getItems(final List<String> ids) {
return new MyList<>(Collections.emptyList());
}
@Override
protected Optional<BatchJobFailedItemCacheFailureResolvePolicy> getBatchJobFailedItemCacheFailureResolvePolicy() {
return policy;
}
}
static class MyItem extends AbstractBatchJobItem<String> {
protected MyItem(final String item) {
super(item);
}
@Override
public String getItemId() {
return getItem();
}
@Override
public String getItemType() {
return this.getClass().getSimpleName();
}
}
static class MyList<E> extends ArrayList<E> {
public MyList(final List<E> list) {
super(list);
}
}
}
| 4,667 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures/support/AbstractFailedItemsBatchJobItemsExtractorTest.java | package com.paypal.jobsystem.batchjobfailures.support;
import com.paypal.jobsystem.batchjob.model.BatchJobContext;
import com.paypal.jobsystem.batchjob.model.BatchJobItem;
import com.paypal.jobsystem.batchjobfailures.repositories.entities.BatchJobFailedItem;
import com.paypal.jobsystem.batchjobfailures.services.BatchJobFailedItemService;
import com.paypal.jobsystem.batchjobfailures.support.AbstractFailedItemsBatchJobItemsExtractor;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import java.util.Collection;
import java.util.List;
import static org.mockito.Mockito.verify;
@ExtendWith(MockitoExtension.class)
class AbstractFailedItemsBatchJobItemsExtractorTest {
private static final String ITEM_TYPE = "itemType";
@Mock
private BatchJobFailedItemService batchJobFailedItemServiceMock;
@Mock
private BatchJobContext batchJobContextMock;
@Test
void getItems_ShouldCallBatchJobFailedItemService() {
final AbstractFailedItemsBatchJobItemsExtractor<BatchJobContext, BatchJobItem<Object>> testObj = new MyAbstractFailedItemsBatchJobItemsExtractor(
ITEM_TYPE, batchJobFailedItemServiceMock);
testObj.getItems(batchJobContextMock);
verify(batchJobFailedItemServiceMock).getFailedItemsForRetry(ITEM_TYPE);
}
private static class MyAbstractFailedItemsBatchJobItemsExtractor
extends AbstractFailedItemsBatchJobItemsExtractor<BatchJobContext, BatchJobItem<Object>> {
protected MyAbstractFailedItemsBatchJobItemsExtractor(final String itemType,
final BatchJobFailedItemService batchJobFailedItemService) {
super(itemType, batchJobFailedItemService);
}
@Override
protected Collection<BatchJobItem<Object>> getBatchJobFailedItems(
final List<BatchJobFailedItem> batchJobFailedItems) {
return List.of();
}
}
}
| 4,668 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures/controllers/JobFailuresControllerTest.java | package com.paypal.jobsystem.batchjobfailures.controllers;
import com.paypal.jobsystem.batchjobfailures.repositories.entities.BatchJobFailedItem;
import com.paypal.jobsystem.batchjobfailures.services.BatchJobFailedItemService;
import com.paypal.jobsystem.batchjobfailures.controllers.converters.BatchJobFailedItemResponseConverter;
import com.paypal.jobsystem.batchjobfailures.controllers.dto.BatchJobFailedItemResponse;
import com.paypal.jobsystem.batchjobfailures.controllers.JobFailuresController;
import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
class JobFailuresControllerTest {
@InjectMocks
private JobFailuresController testObj;
@Mock
private BatchJobFailedItemService batchJobFailedItemServiceMock;
@Mock
private BatchJobFailedItemResponseConverter batchJobFailedItemResponseConverterMock;
@Mock
private BatchJobFailedItem batchJobFailedItem1Mock, batchJobFailedItem2Mock;
@Mock
private BatchJobFailedItemResponse batchJobFailedItemResponse1Mock, batchJobFailedItemResponse2Mock;
@Test
void getFailedItems_ShouldReturnAllFailedItemsOfAType() {
final List<BatchJobFailedItem> batchJobFailedItems = List.of(batchJobFailedItem1Mock, batchJobFailedItem2Mock);
final List<BatchJobFailedItemResponse> batchJobFailedItemResponses = List.of(batchJobFailedItemResponse1Mock,
batchJobFailedItemResponse2Mock);
when(batchJobFailedItemServiceMock.getFailedItems("type1")).thenReturn(batchJobFailedItems);
when(batchJobFailedItemResponseConverterMock.toResponse(batchJobFailedItems))
.thenReturn(batchJobFailedItemResponses);
final List<BatchJobFailedItemResponse> result = testObj.getFailedItems("type1");
assertThat(result).containsAll(batchJobFailedItemResponses);
}
}
| 4,669 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures/aspects/BatchJobFailedItemServiceCacheAspectTest.java | package com.paypal.jobsystem.batchjobfailures.aspects;
import com.paypal.jobsystem.batchjob.model.BatchJobItem;
import com.paypal.jobsystem.batchjobfailures.aspects.BatchJobFailedItemServiceCacheAspect;
import com.paypal.jobsystem.batchjobfailures.services.cache.BatchJobFailedItemCacheService;
import org.aspectj.lang.JoinPoint;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import java.util.Collection;
import java.util.List;
import static org.mockito.ArgumentMatchers.argThat;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
class BatchJobFailedItemServiceCacheAspectTest {
@InjectMocks
private BatchJobFailedItemServiceCacheAspect testObj;
@Mock
private BatchJobFailedItemCacheService batchJobFailedItemCacheService;
@Mock
private JoinPoint joinPointMock;
@Mock
private BatchJobItem<Object> batchJobItemMock;
@Test
void beforeSaveItem_shouldCacheItem() {
when(joinPointMock.getArgs()).thenReturn(new Object[] { batchJobItemMock });
testObj.beforeSaveItem(joinPointMock);
verify(batchJobFailedItemCacheService).storeItem(batchJobItemMock);
}
@Test
void beforeRemoveItem_shouldRemoveItem() {
when(joinPointMock.getArgs()).thenReturn(new Object[] { batchJobItemMock });
when(batchJobItemMock.getItemId()).thenReturn("id");
when(batchJobItemMock.getItemType()).thenReturn("type");
testObj.beforeRemoveItem(joinPointMock);
verify(batchJobFailedItemCacheService).removeItem("type", "id");
}
@Test
void beforeCheckUpdatedItems() {
when(joinPointMock.getArgs()).thenReturn(new Object[] { List.of(batchJobItemMock) });
testObj.beforeCheckUpdatedItems(joinPointMock);
verify(batchJobFailedItemCacheService)
.refreshCachedItems(argThat((Collection c) -> c.contains(batchJobItemMock)));
}
}
| 4,670 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures/listeners/FailureBatchJobItemProcessingListenerTest.java | package com.paypal.jobsystem.batchjobfailures.listeners;
import com.paypal.jobsystem.batchjob.model.BatchJob;
import com.paypal.jobsystem.batchjob.model.BatchJobContext;
import com.paypal.jobsystem.batchjob.model.BatchJobItem;
import com.paypal.jobsystem.batchjob.model.BatchJobType;
import com.paypal.jobsystem.batchjobfailures.listeners.FailureBatchJobItemProcessingListener;
import com.paypal.jobsystem.batchjobfailures.services.BatchJobFailedItemService;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import java.util.List;
import static org.mockito.ArgumentMatchers.argThat;
import static org.mockito.Mockito.*;
@ExtendWith(MockitoExtension.class)
class FailureBatchJobItemProcessingListenerTest {
@InjectMocks
private FailureBatchJobItemProcessingListener testObj;
@Mock
private BatchJobFailedItemService batchJobFailedItemServiceMock;
@Mock
private BatchJobContext batchJobContextMock;
@Mock
private BatchJobItem<Object> batchJobItemMock;
@Mock
private BatchJob batchJobMock;
@Mock
private Exception exceptionMock;
@Test
void onItemProcessingFailure_ShouldCallBatchJobFailedItemServiceItemFailedMethod() {
testObj.onItemProcessingFailure(batchJobContextMock, batchJobItemMock, exceptionMock);
verify(batchJobFailedItemServiceMock).saveItemFailed(batchJobItemMock);
}
@Test
void onItemProcessingSuccess_ShouldCallBatchJobFailedItemServiceItemProcessedMethod() {
testObj.onItemProcessingFailure(batchJobContextMock, batchJobItemMock, exceptionMock);
verify(batchJobFailedItemServiceMock).saveItemFailed(batchJobItemMock);
}
@SuppressWarnings("unchecked")
@Test
void onItemItemExtractionSuccessful_ShouldCallBatchJobFailedItemServiceItemProcessedMethod_ForExtractJobs() {
when(batchJobContextMock.getBatchJob()).thenReturn(batchJobMock);
when(batchJobMock.getType()).thenReturn(BatchJobType.EXTRACT);
testObj.onItemExtractionSuccessful(batchJobContextMock, List.of(batchJobItemMock));
verify(batchJobFailedItemServiceMock).checkUpdatedFailedItems(argThat(list -> list.contains(batchJobItemMock)));
}
@SuppressWarnings("unchecked")
@Test
void onItemItemExtractionSuccessful_ShouldCallBatchJobFailedItemServiceItemProcessedMethod_ForRetryJobs() {
when(batchJobContextMock.getBatchJob()).thenReturn(batchJobMock);
when(batchJobMock.getType()).thenReturn(BatchJobType.RETRY);
testObj.onItemExtractionSuccessful(batchJobContextMock, List.of(batchJobItemMock));
verify(batchJobFailedItemServiceMock, times(0)).checkUpdatedFailedItems(any());
}
}
| 4,671 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures/services/BatchJobFailedItemServiceImplTest.java | package com.paypal.jobsystem.batchjobfailures.services;
import com.paypal.jobsystem.batchjob.model.BatchJobItem;
import com.paypal.jobsystem.batchjobaudit.repositories.entities.BatchJobItemTrackInfoEntity;
import com.paypal.jobsystem.batchjobaudit.services.BatchJobTrackingService;
import com.paypal.jobsystem.batchjobfailures.repositories.entities.BatchJobFailedItem;
import com.paypal.jobsystem.batchjobfailures.repositories.entities.BatchJobFailedItemId;
import com.paypal.jobsystem.batchjobfailures.repositories.entities.BatchJobFailedItemStatus;
import com.paypal.jobsystem.batchjobfailures.repositories.BatchJobFailedItemRepository;
import com.paypal.jobsystem.batchjobfailures.services.retrypolicies.BatchJobFailedItemRetryPolicy;
import com.paypal.jobsystem.batchjobsupport.support.AbstractBatchJobItem;
import com.paypal.infrastructure.mail.services.MailNotificationUtil;
import com.paypal.infrastructure.support.date.TimeMachine;
import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Captor;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.junit.jupiter.MockitoExtension;
import org.springframework.data.domain.Pageable;
import java.time.LocalDateTime;
import java.util.List;
import java.util.Optional;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.*;
@ExtendWith(MockitoExtension.class)
class BatchJobFailedItemServiceImplTest {
private static final String ID_001 = "001";
private static final String ID_002 = "002";
private static final String SELLER_TYPE = "SELLER";
private static final int INITIAL_NUMBER_OF_RETRIES = 3;
private BatchJobFailedItemServiceImpl testObj;
@Mock
private BatchJobFailedItemRepository batchJobFailedItemRepositoryMock;
@Mock
private BatchJobTrackingService batchJobTrackingServiceMock;
@Mock
private BatchJobFailedItemRetryPolicy batchJobFailedItemRetryPolicyMock;
@Mock
private MailNotificationUtil mailNotificationUtilMock;
@Captor
private ArgumentCaptor<BatchJobFailedItem> batchJobFailedItemArgumentCaptor;
@Mock
private BatchJobFailedItem batchJobFailedItem1Mock, batchJobFailedItem2Mock, batchJobFailedItem3Mock;
@Mock
private BatchJobItemTrackInfoEntity batchJobItemTrackInfoEntity1Mock;
@Mock
private BatchJobItem<?> batchJobItem1Mock, batchJobItem2Mock, batchJobItem3Mock;
@BeforeEach
void setUp() {
testObj = Mockito.spy(new BatchJobFailedItemServiceImpl(batchJobFailedItemRepositoryMock,
batchJobTrackingServiceMock, List.of(batchJobFailedItemRetryPolicyMock), mailNotificationUtilMock));
}
@Test
void itemFailed_ShouldCreateAndSaveAnewFailedItem_WhenBatchJobItemIsNotFound() {
final LocalDateTime now = TimeMachine.now();
TimeMachine.useFixedClockAt(now);
final MySellerBatchJobItem sellerBatchJobItem = new MySellerBatchJobItem(new Object());
final BatchJobFailedItemId seller = new BatchJobFailedItemId(ID_001, SELLER_TYPE);
when(batchJobFailedItemRepositoryMock.findById(seller)).thenReturn(Optional.empty());
testObj.saveItemFailed(sellerBatchJobItem);
verify(batchJobFailedItemRepositoryMock).save(batchJobFailedItemArgumentCaptor.capture());
final BatchJobFailedItem capturedBatchJobFailedItem = batchJobFailedItemArgumentCaptor.getValue();
assertThat(capturedBatchJobFailedItem.getId()).isEqualTo(ID_001);
assertThat(capturedBatchJobFailedItem.getType()).isEqualTo(SELLER_TYPE);
assertThat(capturedBatchJobFailedItem.getNumberOfRetries()).isZero();
assertThat(capturedBatchJobFailedItem.getFirstFailureTimestamp()).isEqualTo(now);
}
@Test
void itemFailed_ShouldUpdateAndSaveTheFailedItem_WhenBatchJobItemIsFound() {
final LocalDateTime now = TimeMachine.now();
TimeMachine.useFixedClockAt(now);
final MySellerBatchJobItem sellerBatchJobItem = new MySellerBatchJobItem(new Object());
final BatchJobFailedItemId seller = new BatchJobFailedItemId(ID_001, SELLER_TYPE);
final BatchJobFailedItem batchJobFailedItem = new BatchJobFailedItem();
batchJobFailedItem.setId(ID_001);
batchJobFailedItem.setType(SELLER_TYPE);
batchJobFailedItem.setNumberOfRetries(INITIAL_NUMBER_OF_RETRIES);
when(batchJobFailedItemRepositoryMock.findById(seller)).thenReturn(Optional.of(batchJobFailedItem));
testObj.saveItemFailed(sellerBatchJobItem);
verify(batchJobFailedItemRepositoryMock).save(batchJobFailedItemArgumentCaptor.capture());
final BatchJobFailedItem capturedBatchJobFailedItem = batchJobFailedItemArgumentCaptor.getValue();
assertThat(capturedBatchJobFailedItem.getId()).isEqualTo(ID_001);
assertThat(capturedBatchJobFailedItem.getType()).isEqualTo(SELLER_TYPE);
assertThat(capturedBatchJobFailedItem.getNumberOfRetries()).isEqualTo(INITIAL_NUMBER_OF_RETRIES + 1);
assertThat(capturedBatchJobFailedItem.getLastRetryTimestamp()).isEqualTo(now);
}
@Test
void itemFailed_ShouldChangeItemStatusToExhaustedAndSendEmail_WhenBatchJobItemNumberOfRetriesIsGreaterOrEqualsThanFive() {
final MySellerBatchJobItem sellerBatchJobItem = new MySellerBatchJobItem(new Object());
final BatchJobFailedItemId seller = new BatchJobFailedItemId(ID_001, SELLER_TYPE);
when(batchJobFailedItemRepositoryMock.findById(seller)).thenReturn(Optional.of(batchJobFailedItem1Mock));
when(batchJobFailedItem1Mock.getStatus()).thenReturn(BatchJobFailedItemStatus.RETRY_PENDING);
when(batchJobFailedItem1Mock.getNumberOfRetries()).thenReturn(5);
when(batchJobFailedItem1Mock.getId()).thenReturn(ID_001);
testObj.saveItemFailed(sellerBatchJobItem);
verify(batchJobFailedItem1Mock).setStatus(BatchJobFailedItemStatus.RETRIES_EXHAUSTED);
verify(mailNotificationUtilMock).sendPlainTextEmail(
"Max retry attempts reached when processing item [" + ID_001 + "]",
"Max retry attempts reached when processing item [" + ID_001
+ "], the item won't be automatically retried again. It will be processed again if the item has any changes or if a manual job execution is done.");
}
@Test
void itemFailed_ShouldNotChangeItemStatusToExhaustedAndNotSendEmail_WhenBatchJobItemNumberOfRetriesIsLessThanFive() {
final MySellerBatchJobItem sellerBatchJobItem = new MySellerBatchJobItem(new Object());
final BatchJobFailedItemId seller = new BatchJobFailedItemId(ID_001, SELLER_TYPE);
when(batchJobFailedItemRepositoryMock.findById(seller)).thenReturn(Optional.of(batchJobFailedItem1Mock));
when(batchJobFailedItem1Mock.getStatus()).thenReturn(BatchJobFailedItemStatus.RETRY_PENDING);
when(batchJobFailedItem1Mock.getNumberOfRetries()).thenReturn(4);
testObj.saveItemFailed(sellerBatchJobItem);
verify(batchJobFailedItem1Mock, never()).setStatus(BatchJobFailedItemStatus.RETRIES_EXHAUSTED);
verify(mailNotificationUtilMock, never()).sendPlainTextEmail(
"Max retry attempts reached when processing item [" + ID_001 + "]",
"Max retry attempts reached when processing item [" + ID_001
+ "], the item won't be automatically retried again. It will be processed again if the item has any changes or if a manual job execution is done.");
}
@Test
void itemFailed_ShouldNotChangeItemStatusToExhaustedAndNotSendEmail_WhenBatchJobItemNumberOfRetriesIsGreaterOrEqualsThanFiveAndStatusIsRetriesExhausted() {
final MySellerBatchJobItem sellerBatchJobItem = new MySellerBatchJobItem(new Object());
final BatchJobFailedItemId seller = new BatchJobFailedItemId(ID_001, SELLER_TYPE);
when(batchJobFailedItemRepositoryMock.findById(seller)).thenReturn(Optional.of(batchJobFailedItem1Mock));
when(batchJobFailedItem1Mock.getStatus()).thenReturn(BatchJobFailedItemStatus.RETRIES_EXHAUSTED);
when(batchJobFailedItem1Mock.getNumberOfRetries()).thenReturn(5);
testObj.saveItemFailed(sellerBatchJobItem);
verify(batchJobFailedItem1Mock, never()).setStatus(BatchJobFailedItemStatus.RETRIES_EXHAUSTED);
verify(mailNotificationUtilMock, never()).sendPlainTextEmail(
"Max retry attempts reached when processing item [" + ID_001 + "]",
"Max retry attempts reached when processing item [" + ID_001
+ "], the item won't be automatically retried again. It will be processed again if the item has any changes or if a manual job execution is done.");
}
@Test
void itemProcessed_ShouldShouldRemoveTheFailedItem_WhenFailedItemIsFound() {
final MySellerBatchJobItem sellerBatchJobItem = new MySellerBatchJobItem(new Object());
final BatchJobFailedItemId seller = new BatchJobFailedItemId(ID_001, SELLER_TYPE);
final BatchJobFailedItem batchJobFailedItem = new BatchJobFailedItem();
batchJobFailedItem.setId(ID_001);
batchJobFailedItem.setType(SELLER_TYPE);
batchJobFailedItem.setNumberOfRetries(INITIAL_NUMBER_OF_RETRIES);
when(batchJobFailedItemRepositoryMock.findById(seller)).thenReturn(Optional.of(batchJobFailedItem));
testObj.removeItemProcessed(sellerBatchJobItem);
verify(batchJobFailedItemRepositoryMock).delete(batchJobFailedItem);
}
@Test
void itemProcessed_ShouldShouldNitRemoveTheFailedItem_WhenFailedItemIsNotFound() {
final MySellerBatchJobItem sellerBatchJobItem = new MySellerBatchJobItem(new Object());
final BatchJobFailedItemId seller = new BatchJobFailedItemId(ID_001, SELLER_TYPE);
when(batchJobFailedItemRepositoryMock.findById(seller)).thenReturn(Optional.empty());
testObj.removeItemProcessed(sellerBatchJobItem);
verify(batchJobFailedItemRepositoryMock, never()).delete(any());
}
@Test
void getFailedItemsForRetry_ShouldReturnBatchJobFailedItemThatAreNotBeingProcessed() {
doReturn(5).when(testObj).getMaxNumberOfFailedItems();
when(batchJobFailedItem1Mock.getId()).thenReturn(ID_001);
when(batchJobFailedItem2Mock.getId()).thenReturn(ID_002);
when(batchJobFailedItemRetryPolicyMock.shouldRetryFailedItem(batchJobFailedItem2Mock)).thenReturn(true);
when(batchJobFailedItemRepositoryMock.findByTypeAndStatusOrderByLastRetryTimestampAsc(SELLER_TYPE,
BatchJobFailedItemStatus.RETRY_PENDING, Pageable.ofSize(5)))
.thenReturn(List.of(batchJobFailedItem1Mock, batchJobFailedItem2Mock));
when(batchJobItemTrackInfoEntity1Mock.getItemId()).thenReturn(ID_001);
when(batchJobTrackingServiceMock.getItemsBeingProcessedOrEnquedToProcess(SELLER_TYPE))
.thenReturn(List.of(batchJobItemTrackInfoEntity1Mock));
final List<BatchJobFailedItem> result = testObj.getFailedItemsForRetry(SELLER_TYPE);
assertThat(result.stream().map(BatchJobFailedItem::getId)).containsExactly(ID_002);
}
@Test
void getFailedItems_ShouldReturnAllFailedItemsOfAType() {
final List<BatchJobFailedItem> batchJobFailedItems = List.of(batchJobFailedItem1Mock, batchJobFailedItem2Mock);
when(batchJobFailedItemRepositoryMock.findByType("type1")).thenReturn(batchJobFailedItems);
final List<BatchJobFailedItem> result = testObj.getFailedItems("type1");
assertThat(result).containsAll(batchJobFailedItems);
}
@Test
void checkUpdatedFailedItems() {
when(batchJobItem1Mock.getItemId()).thenReturn("1");
when(batchJobItem1Mock.getItemType()).thenReturn("test");
when(batchJobItem2Mock.getItemId()).thenReturn("2");
when(batchJobItem2Mock.getItemType()).thenReturn("test");
when(batchJobItem3Mock.getItemId()).thenReturn("3");
when(batchJobItem3Mock.getItemType()).thenReturn("test");
when(batchJobFailedItemRepositoryMock.findById(new BatchJobFailedItemId("1", "test")))
.thenReturn(Optional.of(batchJobFailedItem1Mock));
when(batchJobFailedItemRepositoryMock.findById(new BatchJobFailedItemId("2", "test")))
.thenReturn(Optional.empty());
when(batchJobFailedItemRepositoryMock.findById(new BatchJobFailedItemId("3", "test")))
.thenReturn(Optional.of(batchJobFailedItem3Mock));
testObj.checkUpdatedFailedItems(List.of(batchJobItem1Mock, batchJobItem2Mock, batchJobItem3Mock));
verify(batchJobFailedItem1Mock).setNumberOfRetries(0);
verify(batchJobFailedItemRepositoryMock).save(batchJobFailedItem1Mock);
verify(batchJobFailedItemRepositoryMock, times(0)).save(batchJobFailedItem2Mock);
verify(batchJobFailedItem3Mock).setNumberOfRetries(0);
verify(batchJobFailedItemRepositoryMock).save(batchJobFailedItem3Mock);
}
private static class MySellerBatchJobItem extends AbstractBatchJobItem<Object> {
protected MySellerBatchJobItem(final Object item) {
super(item);
}
@Override
public String getItemId() {
return ID_001;
}
@Override
public String getItemType() {
return SELLER_TYPE;
}
}
}
| 4,672 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures/services | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures/services/cache/BatchJobFailedItemCacheServiceImplTest.java | package com.paypal.jobsystem.batchjobfailures.services.cache;
import com.paypal.jobsystem.batchjobfailures.repositories.entities.BatchJobFailedItem;
import com.paypal.jobsystem.batchjob.model.BatchJobItem;
import com.paypal.jobsystem.batchjobfailures.services.cache.BatchJobFailedItemCacheServiceImpl;
import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.springframework.cache.Cache;
import org.springframework.cache.CacheManager;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.*;
@ExtendWith(MockitoExtension.class)
class BatchJobFailedItemCacheServiceImplTest {
public static final String ITEM_TYPE = "itemType";
public static final String ITEM_ID_1 = "itemId1";
public static final String ITEM_ID_2 = "itemId2";
@InjectMocks
private BatchJobFailedItemCacheServiceImpl testObj;
@Mock
private CacheManager cacheManagerMock;
@Mock
private Cache cacheMock;
@Mock
private BatchJobItem<Object> batchJobItemMock1, batchJobItemMock2;
@BeforeEach
void setUp() {
when(cacheManagerMock.getCache(ITEM_TYPE)).thenReturn(cacheMock);
}
@Test
void storeItem_ShouldPutItemInCache() {
when(batchJobItemMock1.getItemType()).thenReturn(ITEM_TYPE);
when(batchJobItemMock1.getItemId()).thenReturn(ITEM_ID_1);
testObj.storeItem(batchJobItemMock1);
verify(cacheMock).put(ITEM_ID_1, batchJobItemMock1);
}
@Test
void retrieveItem1_ShouldReturnItemFromCache_WhenIsPresent() {
when(cacheMock.get(ITEM_ID_1, (Class<BatchJobItem<Object>>) batchJobItemMock1.getClass()))
.thenReturn(batchJobItemMock1);
final Optional<BatchJobItem<Object>> result = testObj
.retrieveItem((Class<BatchJobItem<Object>>) batchJobItemMock1.getClass(), ITEM_TYPE, ITEM_ID_1);
assertThat(result).contains(batchJobItemMock1);
}
@Test
void retrieveItem1_ShouldNotReturnItemFromCache_WhenIsNotPresent() {
when(cacheMock.get(ITEM_ID_1, (Class<BatchJobItem<Object>>) batchJobItemMock1.getClass())).thenReturn(null);
final Optional<BatchJobItem<Object>> result = testObj
.retrieveItem((Class<BatchJobItem<Object>>) batchJobItemMock1.getClass(), ITEM_TYPE, ITEM_ID_1);
assertThat(result).isEmpty();
}
@Test
void retrieveItem2_ShouldReturnItemFromCache_WhenIsPresent() {
final BatchJobFailedItem batchJobFailedItem = new BatchJobFailedItem();
batchJobFailedItem.setId(ITEM_ID_1);
batchJobFailedItem.setType(ITEM_TYPE);
when(cacheMock.get(ITEM_ID_1, (Class<BatchJobItem<Object>>) batchJobItemMock1.getClass()))
.thenReturn(batchJobItemMock1);
final Optional<BatchJobItem<Object>> result = testObj
.retrieveItem((Class<BatchJobItem<Object>>) batchJobItemMock1.getClass(), batchJobFailedItem);
assertThat(result).contains(batchJobItemMock1);
}
@Test
void retrieveItem2_ShouldNotReturnItemFromCache_WhenIsNotPresent() {
final BatchJobFailedItem batchJobFailedItem = new BatchJobFailedItem();
batchJobFailedItem.setId(ITEM_ID_1);
batchJobFailedItem.setType(ITEM_TYPE);
when(cacheMock.get(ITEM_ID_1, (Class<BatchJobItem<Object>>) batchJobItemMock1.getClass())).thenReturn(null);
final Optional<BatchJobItem<Object>> result = testObj
.retrieveItem((Class<BatchJobItem<Object>>) batchJobItemMock1.getClass(), batchJobFailedItem);
assertThat(result).isEmpty();
}
@Test
void removeItem_ShouldRemoveItemFromCache() {
testObj.removeItem(ITEM_TYPE, ITEM_ID_1);
verify(cacheMock).evictIfPresent(ITEM_ID_1);
}
@Test
void refreshCachedItems_ShouldRefreshItemFromCache_WhenTheyArePresent() {
when(batchJobItemMock1.getItemType()).thenReturn(ITEM_TYPE);
when(batchJobItemMock1.getItemId()).thenReturn(ITEM_ID_1);
when(batchJobItemMock2.getItemType()).thenReturn(ITEM_TYPE);
when(batchJobItemMock2.getItemId()).thenReturn(ITEM_ID_2);
when(cacheMock.get(ITEM_ID_1, (Class<BatchJobItem<Object>>) batchJobItemMock1.getClass()))
.thenReturn(batchJobItemMock1);
when(cacheMock.get(ITEM_ID_2, (Class<BatchJobItem<Object>>) batchJobItemMock1.getClass())).thenReturn(null);
testObj.refreshCachedItems(List.of(batchJobItemMock1, batchJobItemMock2));
verify(cacheMock).put(ITEM_ID_1, batchJobItemMock1);
verify(cacheMock, never()).put(ITEM_ID_2, batchJobItemMock2);
}
@Test
void refreshCachedItem_ShouldRefreshItemFromCache_WhenIsPresent() {
when(batchJobItemMock1.getItemType()).thenReturn(ITEM_TYPE);
when(batchJobItemMock1.getItemId()).thenReturn(ITEM_ID_1);
when(cacheMock.get(ITEM_ID_1, (Class<BatchJobItem<Object>>) batchJobItemMock1.getClass()))
.thenReturn(batchJobItemMock1);
testObj.refreshCachedItem(batchJobItemMock1);
verify(cacheMock).put(ITEM_ID_1, batchJobItemMock1);
}
@Test
void refreshCachedItem_ShouldNotRefreshItemFromCache_WhenIsNotPresent() {
when(batchJobItemMock1.getItemType()).thenReturn(ITEM_TYPE);
when(batchJobItemMock1.getItemId()).thenReturn(ITEM_ID_1);
when(cacheMock.get(ITEM_ID_1, (Class<BatchJobItem<Object>>) batchJobItemMock1.getClass())).thenReturn(null);
testObj.refreshCachedItem(batchJobItemMock1);
verify(cacheMock, never()).put(ITEM_ID_1, batchJobItemMock1);
}
@Test
void retrieveAllItems1_ShouldRetrieveAllItemsFromCacheInAMap() {
final BatchJobFailedItem batchJobFailedItem1 = new BatchJobFailedItem();
batchJobFailedItem1.setType(ITEM_TYPE);
batchJobFailedItem1.setId(ITEM_ID_1);
final BatchJobFailedItem batchJobFailedItem2 = new BatchJobFailedItem();
batchJobFailedItem2.setType(ITEM_TYPE);
batchJobFailedItem2.setId(ITEM_ID_2);
when(cacheMock.get(ITEM_ID_1, (Class<BatchJobItem<Object>>) batchJobItemMock1.getClass()))
.thenReturn(batchJobItemMock1);
when(cacheMock.get(ITEM_ID_2, (Class<BatchJobItem<Object>>) batchJobItemMock1.getClass()))
.thenReturn(batchJobItemMock2);
final Map<BatchJobFailedItem, Optional<BatchJobItem<Object>>> result = testObj.retrieveAllItems(
(Class<BatchJobItem<Object>>) batchJobItemMock1.getClass(),
List.of(batchJobFailedItem1, batchJobFailedItem2));
assertThat(result).containsEntry(batchJobFailedItem1, Optional.of(batchJobItemMock1))
.containsEntry(batchJobFailedItem2, Optional.of(batchJobItemMock2));
}
@Test
void retrieveAllItems2_ShouldRetrieveAllItemsFromCacheInAMapByTheGivenResolver() {
final BatchJobFailedItem batchJobFailedItem1 = new BatchJobFailedItem();
batchJobFailedItem1.setType(ITEM_TYPE);
batchJobFailedItem1.setId(ITEM_ID_1);
final BatchJobFailedItem batchJobFailedItem2 = new BatchJobFailedItem();
batchJobFailedItem2.setType(ITEM_TYPE);
batchJobFailedItem2.setId(ITEM_ID_2);
when(cacheMock.get(ITEM_ID_1, (Class<BatchJobItem<Object>>) batchJobItemMock1.getClass()))
.thenReturn(batchJobItemMock1);
when(cacheMock.get(ITEM_ID_2, (Class<BatchJobItem<Object>>) batchJobItemMock1.getClass())).thenReturn(null)
.thenReturn(batchJobItemMock2);
when(batchJobItemMock2.getItemType()).thenReturn(ITEM_TYPE);
when(batchJobItemMock2.getItemId()).thenReturn(ITEM_ID_2);
final Map<BatchJobFailedItem, Optional<BatchJobItem<Object>>> result = testObj.retrieveAllItems(
(Class<BatchJobItem<Object>>) batchJobItemMock1.getClass(),
List.of(batchJobFailedItem1, batchJobFailedItem2), batchJobFailedItems -> List.of(batchJobItemMock2));
verify(cacheMock).put(ITEM_ID_2, batchJobItemMock2);
verify(cacheMock, never()).put(ITEM_ID_1, batchJobItemMock1);
assertThat(result).containsEntry(batchJobFailedItem1, Optional.of(batchJobItemMock1))
.containsEntry(batchJobFailedItem2, Optional.of(batchJobItemMock2));
}
@Test
void retrieveAllItems3_ShouldRetrieveAllItemsFromCacheInAMapByTheGivenResolverAndTheGivenPolicy() {
when(batchJobItemMock1.getItemType()).thenReturn(ITEM_TYPE);
when(batchJobItemMock1.getItemId()).thenReturn(ITEM_ID_1);
when(batchJobItemMock2.getItemType()).thenReturn(ITEM_TYPE);
when(batchJobItemMock2.getItemId()).thenReturn(ITEM_ID_2);
final BatchJobFailedItem batchJobFailedItem1 = new BatchJobFailedItem();
batchJobFailedItem1.setType(ITEM_TYPE);
batchJobFailedItem1.setId(ITEM_ID_1);
final BatchJobFailedItem batchJobFailedItem2 = new BatchJobFailedItem();
batchJobFailedItem2.setType(ITEM_TYPE);
batchJobFailedItem2.setId(ITEM_ID_2);
when(cacheMock.get(ITEM_ID_1, (Class<BatchJobItem<Object>>) batchJobItemMock1.getClass()))
.thenReturn(batchJobItemMock1);
when(cacheMock.get(ITEM_ID_2, (Class<BatchJobItem<Object>>) batchJobItemMock1.getClass())).thenReturn(null)
.thenReturn(batchJobItemMock2);
when(batchJobItemMock2.getItemType()).thenReturn(ITEM_TYPE);
when(batchJobItemMock2.getItemId()).thenReturn(ITEM_ID_2);
final Map<BatchJobFailedItem, Optional<BatchJobItem<Object>>> result = testObj.retrieveAllItems(
(Class<BatchJobItem<Object>>) batchJobItemMock1.getClass(),
List.of(batchJobFailedItem1, batchJobFailedItem2),
batchJobFailedItems -> List.of(batchJobItemMock1, batchJobItemMock2),
cacheFailures -> (List.of(batchJobFailedItem1, batchJobFailedItem2)));
verify(cacheMock).put(ITEM_ID_2, batchJobItemMock2);
verify(cacheMock).put(ITEM_ID_1, batchJobItemMock1);
assertThat(result).containsEntry(batchJobFailedItem1, Optional.of(batchJobItemMock1))
.containsEntry(batchJobFailedItem2, Optional.of(batchJobItemMock2));
}
}
| 4,673 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures/services | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures/services/resolvepolicies/OnlyCacheFailureItemsFailedItemCacheFailureResolvePolicyTest.java | package com.paypal.jobsystem.batchjobfailures.services.resolvepolicies;
import com.paypal.jobsystem.batchjobfailures.repositories.entities.BatchJobFailedItem;
import com.paypal.jobsystem.batchjobfailures.services.resolvepolicies.OnlyCacheFailureItemsFailedItemCacheFailureResolvePolicy;
import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
class OnlyCacheFailureItemsFailedItemCacheFailureResolvePolicyTest {
@InjectMocks
private OnlyCacheFailureItemsFailedItemCacheFailureResolvePolicy testObj;
@Mock
private BatchJobFailedItem batchJobFailedItemMock1, batchJobFailedItemMock2;
@Test
void itemsToReload_shouldReturnFailedItems() {
when(batchJobFailedItemMock1.getType()).thenReturn("itemType");
when(batchJobFailedItemMock2.getType()).thenReturn("itemType");
final List<BatchJobFailedItem> result = testObj
.itemsToReloadOnCacheFailure(List.of(batchJobFailedItemMock1, batchJobFailedItemMock2));
assertThat(result).containsExactlyInAnyOrder(batchJobFailedItemMock1, batchJobFailedItemMock2);
}
}
| 4,674 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures/services | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures/services/resolvepolicies/AllRetryPendingFailedItemCacheFailureResolvePolicyTest.java | package com.paypal.jobsystem.batchjobfailures.services.resolvepolicies;
import com.paypal.jobsystem.batchjobfailures.repositories.entities.BatchJobFailedItem;
import com.paypal.jobsystem.batchjobfailures.services.resolvepolicies.AllRetryPendingFailedItemCacheFailureResolvePolicy;
import com.paypal.jobsystem.batchjobfailures.services.BatchJobFailedItemService;
import com.paypal.jobsystem.batchjobfailures.repositories.entities.BatchJobFailedItemStatus;
import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.mockito.InjectMocks;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
class AllRetryPendingFailedItemCacheFailureResolvePolicyTest {
@InjectMocks
private AllRetryPendingFailedItemCacheFailureResolvePolicy testObj;
@Mock
private BatchJobFailedItemService batchJobFailedItemService;
@Mock
private BatchJobFailedItem batchJobFailedItemMock1, batchJobFailedItemMock2;
@Test
void itemsToReload_shouldReturnAllRetryPendingItems() {
when(batchJobFailedItemMock1.getType()).thenReturn("itemType");
when(batchJobFailedItemService.getFailedItems("itemType", BatchJobFailedItemStatus.RETRY_PENDING))
.thenReturn(List.of(batchJobFailedItemMock1, batchJobFailedItemMock2));
final List<BatchJobFailedItem> result = testObj.itemsToReloadOnCacheFailure(List.of(batchJobFailedItemMock1));
assertThat(result).containsExactlyInAnyOrder(batchJobFailedItemMock1, batchJobFailedItemMock2);
}
}
| 4,675 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures/services | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures/services/resolvepolicies/AbstractBatchJobFailedItemCacheFailureResolvePolicyTest.java | package com.paypal.jobsystem.batchjobfailures.services.resolvepolicies;
import com.paypal.jobsystem.batchjobfailures.repositories.entities.BatchJobFailedItem;
import com.paypal.infrastructure.support.exceptions.HMCException;
import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.catchThrowable;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
class AbstractBatchJobFailedItemCacheFailureResolvePolicyTest {
@InjectMocks
private MyPolicy testObj;
@Mock
private BatchJobFailedItem batchJobFailedItemMock1, batchJobFailedItemMock2;
@Test
void itemsToReload_shouldReturnEmptyWhenBatchJobFailedItemsIsEmpty() {
final List<BatchJobFailedItem> result = testObj.itemsToReloadOnCacheFailure(new ArrayList<>());
assertThat(result).isEmpty();
}
@Test
void itemsToReload_shouldThrowExceptionOnMultipleBatchJobItemTypes() {
when(batchJobFailedItemMock1.getType()).thenReturn("t1");
when(batchJobFailedItemMock2.getType()).thenReturn("t2");
final List<BatchJobFailedItem> batchJobFailedItems = List.of(batchJobFailedItemMock1, batchJobFailedItemMock2);
final Throwable throwable = catchThrowable(() -> testObj.itemsToReloadOnCacheFailure(batchJobFailedItems));
assertThat(throwable).isNotNull().isInstanceOf(HMCException.class);
}
@Test
void itemsToReload_shouldInvokeInternalMethodFromChildClasses() {
final List<BatchJobFailedItem> batchJobFailedItems = List.of(batchJobFailedItemMock1, batchJobFailedItemMock2);
final List<BatchJobFailedItem> result = testObj.itemsToReloadOnCacheFailure(batchJobFailedItems);
assertThat(result).containsExactly(batchJobFailedItemMock2, batchJobFailedItemMock1);
}
static class MyPolicy extends AbstractBatchJobFailedItemCacheFailureResolvePolicy {
@Override
protected List<BatchJobFailedItem> itemsToReloadOnCacheFailureInternal(
final List<BatchJobFailedItem> cacheFailures) {
final List<BatchJobFailedItem> itemsToReload = new ArrayList<>(cacheFailures);
Collections.reverse(itemsToReload);
return itemsToReload;
}
}
}
| 4,676 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures/services | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures/services/retrypolicies/ExponentialBackOffItemRetryPolicyTest.java | package com.paypal.jobsystem.batchjobfailures.services.retrypolicies;
import com.paypal.jobsystem.batchjobfailures.repositories.entities.BatchJobFailedItem;
import com.paypal.infrastructure.support.date.TimeMachine;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.junit.jupiter.MockitoExtension;
import org.mockito.InjectMocks;
import java.time.LocalDateTime;
import static org.assertj.core.api.Assertions.assertThat;
@ExtendWith(MockitoExtension.class)
class ExponentialBackOffItemRetryPolicyTest {
@InjectMocks
private ExponentialBackOffItemRetryPolicy testObj;
@Test
void shouldRetryFailedItem_ShouldReturnTrue_WhenJobIsRetryable() {
final LocalDateTime now = TimeMachine.now();
TimeMachine.useFixedClockAt(now);
final BatchJobFailedItem batchJobFailedItem = new BatchJobFailedItem();
batchJobFailedItem.setLastRetryTimestamp(now.minusHours(1));
batchJobFailedItem.setNumberOfRetries(0);
final boolean result = testObj.shouldRetryFailedItem(batchJobFailedItem);
assertThat(result).isTrue();
}
@Test
void shouldRetryFailedItem_ShouldReturnTrue_WhenJobIsRetryableAndLastRetryTimestampIsNull() {
final LocalDateTime now = TimeMachine.now();
TimeMachine.useFixedClockAt(now);
final BatchJobFailedItem batchJobFailedItem = new BatchJobFailedItem();
batchJobFailedItem.setFirstFailureTimestamp(now.minusHours(1));
batchJobFailedItem.setLastRetryTimestamp(null);
batchJobFailedItem.setNumberOfRetries(0);
final boolean result = testObj.shouldRetryFailedItem(batchJobFailedItem);
assertThat(result).isTrue();
}
@Test
void shouldRetryFailedItem_ShouldReturnFalse_WhenJobIsNotRetryable() {
final LocalDateTime now = TimeMachine.now();
TimeMachine.useFixedClockAt(now);
final BatchJobFailedItem batchJobFailedItem = new BatchJobFailedItem();
batchJobFailedItem.setLastRetryTimestamp(now);
batchJobFailedItem.setNumberOfRetries(1);
final boolean result = testObj.shouldRetryFailedItem(batchJobFailedItem);
assertThat(result).isFalse();
}
}
| 4,677 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures/services | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobfailures/services/retrypolicies/AttemptsRetryPolicyTest.java | package com.paypal.jobsystem.batchjobfailures.services.retrypolicies;
import com.paypal.jobsystem.batchjobfailures.repositories.entities.BatchJobFailedItem;
import com.paypal.jobsystem.batchjobfailures.services.retrypolicies.AttemptsRetryPolicy;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
class AttemptsRetryPolicyTest {
@InjectMocks
private AttemptsRetryPolicy testObj;
@Mock
private BatchJobFailedItem batchJobFailedItemMock;
@Test
void shouldRetryFailedItem_ShouldReturnTrue_WhenNumberOfRetriesIsLessThanFive() {
when(batchJobFailedItemMock.getNumberOfRetries()).thenReturn(4);
final boolean result = testObj.shouldRetryFailedItem(batchJobFailedItemMock);
assertThat(result).isTrue();
}
@Test
void shouldRetryFailedItem_ShouldReturnFalse_WhenNumberOfRetriesIsBiggerOrEqualThanFive() {
when(batchJobFailedItemMock.getNumberOfRetries()).thenReturn(5);
final boolean result = testObj.shouldRetryFailedItem(batchJobFailedItemMock);
assertThat(result).isFalse();
}
}
| 4,678 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzintegration/QuartzIntegrationListenerConfigurationTest.java | package com.paypal.jobsystem.quartzintegration;
import com.paypal.jobsystem.quartzintegration.QuartzIntegrationListenerConfiguration;
import com.paypal.jobsystem.quartzintegration.listener.JobExecutionInformationListener;
import com.paypal.jobsystem.quartzintegration.listener.SameJobVetoingListener;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Captor;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.quartz.ListenerManager;
import org.quartz.Scheduler;
import org.quartz.SchedulerException;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.*;
@ExtendWith(MockitoExtension.class)
class QuartzIntegrationListenerConfigurationTest {
@InjectMocks
private QuartzIntegrationListenerConfiguration testObj;
@Mock
private Scheduler scheduler;
@Mock
private ListenerManager listenerManagerMock;
@Mock
private JobExecutionInformationListener jobExecutionInformationListenerMock;
@Mock
private SameJobVetoingListener sameJobVetoingListener;
@Captor
private ArgumentCaptor<JobExecutionInformationListener> jobExecutionInformationListenerArgumentCaptor;
@Captor
private ArgumentCaptor<SameJobVetoingListener> SameJobVetoingListenerArgumentCapture;
@Test
void jobDeltaListenerInit_shouldAddJobExecutionInformationAndSameJobVetoingListener() throws SchedulerException {
when(scheduler.getListenerManager()).thenReturn(listenerManagerMock);
doNothing().when(listenerManagerMock).addJobListener(jobExecutionInformationListenerMock);
testObj.jobJobExecutionInformationListenerInit();
verify(listenerManagerMock).addJobListener(jobExecutionInformationListenerArgumentCaptor.capture());
assertThat(jobExecutionInformationListenerArgumentCaptor.getValue())
.isEqualTo(jobExecutionInformationListenerMock);
}
@Test
void triggerSameJobVetoingListener_shouldAddJobExecutionInformationAndSameJobVetoingListener()
throws SchedulerException {
when(scheduler.getListenerManager()).thenReturn(listenerManagerMock);
doNothing().when(listenerManagerMock).addTriggerListener(sameJobVetoingListener);
testObj.triggerSameJobVetoingListener();
verify(listenerManagerMock).addTriggerListener(SameJobVetoingListenerArgumentCapture.capture());
assertThat(SameJobVetoingListenerArgumentCapture.getValue()).isEqualTo(sameJobVetoingListener);
}
}
| 4,679 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzintegration | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzintegration/support/AbstractDeltaInfoJobTest.java | package com.paypal.jobsystem.quartzintegration.support;
import com.paypal.jobsystem.quartzintegration.repositories.entities.JobExecutionInformationEntity;
import com.paypal.jobsystem.quartzintegration.repositories.JobExecutionInformationRepository;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.quartz.Job;
import org.quartz.JobDataMap;
import org.quartz.JobDetail;
import org.quartz.JobExecutionContext;
import java.util.Date;
import java.util.Map;
import static org.assertj.core.api.AssertionsForInterfaceTypes.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
class AbstractDeltaInfoJobTest {
private static final String INCLUDE_PAID = "includePaid";
private static final String DELTA = "delta";
@InjectMocks
private MyAbstractDeltaInfoJob testObj;
@Mock
private JobExecutionContext jobExecutionContextMock;
@Mock
private JobDetail jobDetailMock;
@Mock
private JobExecutionInformationRepository jobExecutionInformationRepository;
@Mock
private JobExecutionInformationEntity jobExecutionInformationEntityMock;
@Test
void createJobDataMap_shouldAddDeltaKeyWithDatePassedAsArgument() {
final Date now = new Date();
final JobDataMap result = MyAbstractDeltaInfoJob.createJobDataMap(now);
assertThat(result).contains(Map.entry(DELTA, now));
}
@Test
void createJobDataMap_shouldAddDeltaKeyWithDateAndIncludePaidKeyWithBooleanPassedAsArguments() {
final Date now = new Date();
final JobDataMap result = MyAbstractDeltaInfoJob.createJobDataMap(now, Map.of(INCLUDE_PAID, true));
assertThat(result).contains(Map.entry(DELTA, now));
assertThat(result).contains(Map.entry(INCLUDE_PAID, true));
}
@Test
void getDelta_shouldReturnDeltaTimeWhenJobExecutionContextWithDeltaIsPassedAsArgument() {
final Date now = new Date();
when(jobExecutionContextMock.getJobDetail()).thenReturn(jobDetailMock);
final JobDataMap jobDataMap = new JobDataMap();
jobDataMap.put(DELTA, now);
when(jobDetailMock.getJobDataMap()).thenReturn(jobDataMap);
final Date result = testObj.getDelta(jobExecutionContextMock);
assertThat(result).isEqualTo(now);
}
@Test
void getDelta_shouldReturnDeltaTimeWhenJobExecutionContextWithDeltaIsNotPassedAsAndJobWasPreviouslyRunArgument() {
final Date now = new Date();
when(jobExecutionContextMock.getJobDetail()).thenReturn(jobDetailMock);
final JobDataMap jobDataMap = new JobDataMap();
when(jobDetailMock.getJobDataMap()).thenReturn(jobDataMap);
doReturn(MyJob.class).when(jobDetailMock).getJobClass();
when(jobExecutionInformationRepository.findTopByTypeAndEndTimeIsNotNullOrderByIdDesc(any()))
.thenReturn(jobExecutionInformationEntityMock);
when(jobExecutionInformationEntityMock.getStartTime()).thenReturn(now);
final Date result = testObj.getDelta(jobExecutionContextMock);
assertThat(result).isEqualTo(now);
}
@Test
void getDelta_shouldReturnNullWhenJobExecutionContextWithDeltaIsNotPassedAsAndJobWasNotPreviouslyRunArgument() {
when(jobExecutionContextMock.getJobDetail()).thenReturn(jobDetailMock);
final JobDataMap jobDataMap = new JobDataMap();
when(jobDetailMock.getJobDataMap()).thenReturn(jobDataMap);
doReturn(MyJob.class).when(jobDetailMock).getJobClass();
when(jobExecutionInformationRepository.findTopByTypeAndEndTimeIsNotNullOrderByIdDesc(any()))
.thenReturn(jobExecutionInformationEntityMock);
when(jobExecutionInformationEntityMock.getStartTime()).thenReturn(null);
final Date result = testObj.getDelta(jobExecutionContextMock);
assertThat(result).isNull();
}
private static class MyAbstractDeltaInfoJob extends AbstractDeltaInfoJob {
@Override
public void execute(final JobExecutionContext context) {
}
}
protected static class MyJob implements Job {
@Override
public void execute(final JobExecutionContext context) {
}
}
}
| 4,680 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzintegration | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzintegration/controllers/JobControllerTest.java | package com.paypal.jobsystem.quartzintegration.controllers;
import com.paypal.jobsystem.quartzintegration.repositories.entities.JobExecutionInformationEntity;
import com.paypal.jobsystem.quartzintegration.controllers.JobController;
import com.paypal.jobsystem.quartzintegration.services.JobService;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.quartz.*;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import java.util.List;
import java.util.Set;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
class JobControllerTest {
private static final String JOB_NAME_1 = "job1";
private static final String JOB_NAME_2 = "job2";
@InjectMocks
private JobController testObj;
@Mock
private JobService jobServiceMock;
@Mock
private JobExecutionInformationEntity jobExecutionInformationEntityOneMock, jobExecutionInformationEntityTwoMock;
@Test
void status_shouldReplyWithListOfJobs() throws SchedulerException {
final JobDetail job1 = JobBuilder.newJob(MyJob.class).withIdentity(JOB_NAME_1).build();
job1.getJobDataMap().put("runningInstanceId", jobExecutionInformationEntityOneMock);
final JobDetail job2 = JobBuilder.newJob(MyJob.class).withIdentity(JOB_NAME_2).build();
job2.getJobDataMap().put("runningInstanceId", jobExecutionInformationEntityTwoMock);
when(jobServiceMock.getJobs()).thenReturn(Set.of(job1, job2));
final ResponseEntity<List<JobExecutionInformationEntity>> result = testObj.status();
assertThat(result.getStatusCode()).isEqualTo(HttpStatus.ACCEPTED);
assertThat(result.getBody()).containsExactlyInAnyOrder(jobExecutionInformationEntityOneMock,
jobExecutionInformationEntityTwoMock);
}
static class MyJob implements Job {
@Override
public void execute(final JobExecutionContext context) {
// doNothing
}
}
}
| 4,681 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzintegration | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzintegration/controllers/AbstractJobControllerTest.java | package com.paypal.jobsystem.quartzintegration.controllers;
import com.paypal.jobsystem.quartzintegration.support.AbstractDeltaInfoJob;
import com.paypal.jobsystem.quartzintegration.services.JobService;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Spy;
import org.mockito.junit.jupiter.MockitoExtension;
import org.quartz.JobDataMap;
import org.quartz.JobExecutionContext;
import org.quartz.SchedulerException;
import java.util.Date;
import java.util.Map;
import static org.mockito.Mockito.verify;
@ExtendWith(MockitoExtension.class)
class AbstractJobControllerTest {
@Spy
@InjectMocks
private MyAbstractJobController testObj;
@Mock
private JobService jobServiceMock;
@Test
void runSingleJob_shouldCallJobServiceCreateAndRunSingleExecutionJobWithThreeParamsPassed()
throws SchedulerException {
final Date now = new Date();
final String jobName = "newJob";
final JobDataMap jobDataMap = AbstractDeltaInfoJob.createJobDataMap(now);
testObj.runSingleJob(jobName, MyInfoJob.class, now);
verify(jobServiceMock).createAndRunSingleExecutionJob(jobName, MyInfoJob.class, jobDataMap, null);
}
@Test
void runSingleJob_shouldCallJobServiceCreateAndRunSingleExecutionJobWithFourParamsPassed()
throws SchedulerException {
final Date now = new Date();
final String jobName = "newJob";
final JobDataMap jobDataMap = AbstractDeltaInfoJob.createJobDataMap(now, Map.of());
testObj.runSingleJob(jobName, MyInfoJob.class, now, Map.of());
verify(jobServiceMock).createAndRunSingleExecutionJob(jobName, MyInfoJob.class, jobDataMap, null);
}
private static class MyAbstractJobController extends AbstractJobController {
}
private static class MyInfoJob extends AbstractDeltaInfoJob {
@Override
public void execute(final JobExecutionContext context) {
}
}
}
| 4,682 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzintegration | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzintegration/listeners/JobExecutionInformationEntityListenerTest.java | package com.paypal.jobsystem.quartzintegration.listeners;
import com.paypal.jobsystem.quartzintegration.repositories.entities.JobExecutionInformationEntity;
import com.paypal.jobsystem.quartzintegration.repositories.entities.JobStatus;
import com.paypal.jobsystem.quartzintegration.listener.JobExecutionInformationListener;
import com.paypal.jobsystem.quartzintegration.repositories.JobExecutionInformationRepository;
import com.paypal.infrastructure.support.date.DateUtil;
import com.paypal.infrastructure.support.date.TimeMachine;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Captor;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.quartz.*;
import java.time.LocalDateTime;
import java.time.ZoneId;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
class JobExecutionInformationEntityListenerTest {
private static final String MY_JOB_NAME_EXECUTION = "myJobNameExecution";
@InjectMocks
private JobExecutionInformationListener testObj;
@Mock
private Scheduler schedulerMock;
@Mock
private JobExecutionContext contextMock;
@Mock
private JobExecutionInformationRepository jobExecutionInformationRepositoryMock;
@Mock
private JobExecutionInformationEntity savedJobExecutionInformationEntityMock;
@Mock
private JobExecutionException jobExceptionMock;
@Captor
private ArgumentCaptor<JobExecutionInformationEntity> jobExecutionInformationEntityArgumentCaptor;
@Test
void jobToBeExecuted_shouldSaveTheJobExecutionInformationOfLastExecution() {
final LocalDateTime now = LocalDateTime.of(2020, 11, 15, 22, 20);
TimeMachine.useFixedClockAt(now);
final JobDetail jobDetail = JobBuilder.newJob(MyJob.class).withIdentity(MY_JOB_NAME_EXECUTION).build();
when(contextMock.getJobDetail()).thenReturn(jobDetail);
when(jobExecutionInformationRepositoryMock.save(any(JobExecutionInformationEntity.class)))
.thenReturn(savedJobExecutionInformationEntityMock);
testObj.jobToBeExecuted(contextMock);
verify(jobExecutionInformationRepositoryMock).save(jobExecutionInformationEntityArgumentCaptor.capture());
//@formatter:off
assertThat(jobExecutionInformationEntityArgumentCaptor.getValue()).hasFieldOrPropertyWithValue("type", "MyJob")
.hasFieldOrPropertyWithValue("name", MY_JOB_NAME_EXECUTION)
.hasFieldOrPropertyWithValue("startTime", DateUtil.convertToDate(now, ZoneId.systemDefault()))
.hasFieldOrPropertyWithValue("status", JobStatus.RUNNING);
//@formatter:on
}
@Test
void jobWasExecuted_shouldSaveTheJobExecutionInformationOfFinalisedLastExecution() {
final LocalDateTime now = LocalDateTime.of(2020, 11, 15, 22, 20);
TimeMachine.useFixedClockAt(now);
final JobDetail jobDetail = JobBuilder.newJob(MyJob.class).withIdentity(MY_JOB_NAME_EXECUTION).build();
jobDetail.getJobDataMap().put("runningInstanceId", savedJobExecutionInformationEntityMock);
when(contextMock.getJobDetail()).thenReturn(jobDetail);
testObj.jobWasExecuted(contextMock, jobExceptionMock);
verify(savedJobExecutionInformationEntityMock).setStatus(JobStatus.COMPLETED);
verify(savedJobExecutionInformationEntityMock).setEndTime(DateUtil.convertToDate(now, ZoneId.systemDefault()));
verify(jobExecutionInformationRepositoryMock).save(savedJobExecutionInformationEntityMock);
}
private static class MyJob implements Job {
@Override
public void execute(final JobExecutionContext context) {
// doNothing
}
}
}
| 4,683 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzintegration | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzintegration/listeners/SameJobVetoingListenerTest.java | package com.paypal.jobsystem.quartzintegration.listeners;
import com.paypal.jobsystem.quartzintegration.listener.SameJobVetoingListener;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.quartz.*;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
class SameJobVetoingListenerTest {
@InjectMocks
private SameJobVetoingListener testObj;
@Mock
private Trigger triggerMock;
@Mock
private Scheduler schedulerMock;
@Mock
private JobExecutionContext jobToBeTriggeredContextMock;
@Mock
private JobExecutionContext jobAlreadyExecutingContextMock;
@Test
void getName() {
final String result = testObj.getName();
assertThat(result).isEqualTo("sameJobVetoingListener");
}
@Test
void vetoJobExecution_shouldReturnFalseWhenNoOtherJobOfSameClassIsRunning() throws SchedulerException {
final BarJobClass jobToBeTriggered = new BarJobClass();
when(jobToBeTriggeredContextMock.getJobInstance()).thenReturn(jobToBeTriggered);
final FooJobClass jobBeingExecuted = new FooJobClass();
when(jobAlreadyExecutingContextMock.getJobInstance()).thenReturn(jobBeingExecuted);
when(schedulerMock.getCurrentlyExecutingJobs()).thenReturn(List.of(jobAlreadyExecutingContextMock));
final boolean result = testObj.vetoJobExecution(triggerMock, jobToBeTriggeredContextMock);
assertThat(result).isFalse();
}
@Test
void vetoJobExecution_shouldReturnTrueWhenOtherJobOfSameClassIsRunning() throws SchedulerException {
final FooJobClass jobToBeTriggered = new FooJobClass();
when(jobToBeTriggeredContextMock.getJobInstance()).thenReturn(jobToBeTriggered);
when(jobToBeTriggeredContextMock.getJobDetail())
.thenReturn(JobBuilder.newJob(FooJobClass.class).withIdentity("jobName").build());
final FooJobClass jobBeingExecuted = new FooJobClass();
when(jobAlreadyExecutingContextMock.getJobInstance()).thenReturn(jobBeingExecuted);
when(schedulerMock.getCurrentlyExecutingJobs()).thenReturn(List.of(jobAlreadyExecutingContextMock));
final boolean result = testObj.vetoJobExecution(triggerMock, jobToBeTriggeredContextMock);
assertThat(result).isTrue();
}
private static class FooJobClass implements Job {
@Override
public void execute(final JobExecutionContext context) {
// doNothing
}
}
private static class BarJobClass implements Job {
@Override
public void execute(final JobExecutionContext context) {
// doNothing
}
}
}
| 4,684 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzintegration | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzintegration/services/JobServiceTest.java | package com.paypal.jobsystem.quartzintegration.services;
import com.paypal.jobsystem.quartzintegration.repositories.entities.JobStatus;
import com.paypal.infrastructure.support.date.DateUtil;
import com.paypal.infrastructure.support.date.TimeMachine;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Captor;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.quartz.*;
import org.quartz.core.jmx.JobDataMapSupport;
import org.quartz.impl.matchers.GroupMatcher;
import java.time.LocalDateTime;
import java.time.ZoneId;
import java.util.Date;
import java.util.Map;
import java.util.Set;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.*;
@ExtendWith(MockitoExtension.class)
class JobServiceTest {
private static final String NEW_JOB = "newJob";
@InjectMocks
private JobService testObj;
@Mock
private Scheduler schedulerMock;
private final JobDetail runningJobDetail = JobBuilder.newJob(Job.class)
.setJobData(JobDataMapSupport.newJobDataMap(Map.of("status", JobStatus.RUNNING)))
.withIdentity("runningJobDetail").build();
private final JobDetail noStatusJobDetail = JobBuilder.newJob(Job.class).withIdentity("noStatusJobDetail").build();
@Mock
private JobKey jobKeyMock;
@Mock
private JobDetail jobDetailMock;
@Captor
private ArgumentCaptor<JobDetail> jobDetailArgumentCaptor;
@Captor
private ArgumentCaptor<Trigger> expectTriggerArgumentCaptor;
@Test
void getJobs_shouldReturnAllJobsRegisteredOnSystem() throws SchedulerException {
when(schedulerMock.getJobKeys(GroupMatcher.anyJobGroup()))
.thenReturn(Set.of(runningJobDetail.getKey(), noStatusJobDetail.getKey()));
when(schedulerMock.getJobDetail(runningJobDetail.getKey())).thenReturn(runningJobDetail);
when(schedulerMock.getJobDetail(noStatusJobDetail.getKey())).thenReturn(noStatusJobDetail);
final Set<JobDetail> result = testObj.getJobs();
assertThat(result).containsExactly(runningJobDetail, noStatusJobDetail);
}
@Test
void getJobs_whenExceptionIsThrownByScheduler_shouldNotRetrieveJobs() throws SchedulerException {
when(schedulerMock.getJobKeys(GroupMatcher.anyJobGroup()))
.thenReturn(Set.of(runningJobDetail.getKey(), noStatusJobDetail.getKey()));
when(schedulerMock.getJobDetail(runningJobDetail.getKey())).thenReturn(runningJobDetail);
doThrow(new SchedulerException()).when(schedulerMock).getJobDetail(noStatusJobDetail.getKey());
final Set<JobDetail> result = testObj.getJobs();
assertThat(result).containsExactly(runningJobDetail);
}
@Test
void getJobStatus_whenStatusInsideJobDataMapIsInRunningState_shouldReturnRunning() throws SchedulerException {
when(schedulerMock.getJobKeys(GroupMatcher.anyJobGroup()))
.thenReturn(Set.of(runningJobDetail.getKey(), noStatusJobDetail.getKey()));
when(schedulerMock.getJobDetail(runningJobDetail.getKey())).thenReturn(runningJobDetail);
when(schedulerMock.getJobDetail(noStatusJobDetail.getKey())).thenReturn(noStatusJobDetail);
final JobStatus result = testObj.getJobStatus("runningJobDetail");
assertThat(result).isEqualTo(JobStatus.RUNNING);
}
@Test
void getJobStatus_whenJobDataMapIsNull_shouldReturnUnknown() throws SchedulerException {
when(schedulerMock.getJobKeys(GroupMatcher.anyJobGroup())).thenReturn(Set.of(jobKeyMock));
when(schedulerMock.getJobDetail(jobKeyMock)).thenReturn(jobDetailMock);
when(jobKeyMock.getName()).thenReturn("mockedJob");
when(jobDetailMock.getKey()).thenReturn(jobKeyMock);
final JobStatus result = testObj.getJobStatus("mockedJob");
assertThat(result).isEqualTo(JobStatus.UNKNOWN);
}
@Test
void getJobStatus_whenJobDataMapDoesNotContainStatusKey_shouldReturnUnknown() throws SchedulerException {
when(schedulerMock.getJobKeys(GroupMatcher.anyJobGroup())).thenReturn(Set.of(jobKeyMock));
when(schedulerMock.getJobDetail(jobKeyMock)).thenReturn(jobDetailMock);
when(jobKeyMock.getName()).thenReturn("mockedJob");
when(jobDetailMock.getKey()).thenReturn(jobKeyMock);
when(jobDetailMock.getJobDataMap()).thenReturn(new JobDataMap());
final JobStatus result = testObj.getJobStatus("mockedJob");
assertThat(result).isEqualTo(JobStatus.UNKNOWN);
}
@Test
void getJobStatus_whenJobIsNotFound_shouldReturnUnknown() throws SchedulerException {
when(schedulerMock.getJobKeys(GroupMatcher.anyJobGroup()))
.thenReturn(Set.of(runningJobDetail.getKey(), noStatusJobDetail.getKey()));
when(schedulerMock.getJobDetail(runningJobDetail.getKey())).thenReturn(runningJobDetail);
when(schedulerMock.getJobDetail(noStatusJobDetail.getKey())).thenReturn(noStatusJobDetail);
final JobStatus result = testObj.getJobStatus("nonExisting");
assertThat(result).isEqualTo(JobStatus.UNKNOWN);
}
@Test
void getJobStatus_whenJobDataMapIsEmpty_shouldReturnUnknown() throws SchedulerException {
when(schedulerMock.getJobKeys(GroupMatcher.anyJobGroup()))
.thenReturn(Set.of(runningJobDetail.getKey(), noStatusJobDetail.getKey()));
when(schedulerMock.getJobDetail(runningJobDetail.getKey())).thenReturn(runningJobDetail);
when(schedulerMock.getJobDetail(noStatusJobDetail.getKey())).thenReturn(noStatusJobDetail);
final JobStatus result = testObj.getJobStatus("noStatusJobDetail");
assertThat(result).isEqualTo(JobStatus.UNKNOWN);
}
@Test
void createAndRunSingleExecutionJob_whenDateIsEmpty_shouldScheduleNewJobToBeExecutedNow()
throws SchedulerException {
TimeMachine.useFixedClockAt(LocalDateTime.now());
testObj.createAndRunSingleExecutionJob(NEW_JOB, TestJobClass.class,
JobDataMapSupport.newJobDataMap(Map.of("param1Key", "param1")), null);
//@formatter:off
final JobDetail expectedJobDetail = JobBuilder.newJob(TestJobClass.class)
.withIdentity(NEW_JOB)
.usingJobData(JobDataMapSupport.newJobDataMap(Map.of("param1Key", "param1")))
.storeDurably()
.build();
final Trigger expectedTrigger = TriggerBuilder.newTrigger()
.forJob(expectedJobDetail)
.startAt(DateUtil.convertToDate(TimeMachine.now(), ZoneId.systemDefault()))
.build();
//@formatter"on
verify(schedulerMock).addJob(jobDetailArgumentCaptor.capture(), eq(true));
verify(schedulerMock).scheduleJob(expectTriggerArgumentCaptor.capture());
assertThat(jobDetailArgumentCaptor.getValue()).usingRecursiveComparison().isEqualTo(expectedJobDetail);
final Trigger capturedTrigger = expectTriggerArgumentCaptor.getValue();
assertThat(capturedTrigger.getStartTime()).isEqualTo(expectedTrigger.getStartTime());
assertThat(capturedTrigger.getJobKey()).isEqualTo(expectedTrigger.getJobKey());
}
@Test
void createAndRunSingleExecutionJob_shouldScheduleNewJobToBeExecutedInDateProvided() throws SchedulerException {
final Date schedule = new Date();
testObj.createAndRunSingleExecutionJob(NEW_JOB, TestJobClass.class,
JobDataMapSupport.newJobDataMap(Map.of("param1Key", "param1")), schedule);
//@formatter:off
final JobDetail expectedJobDetail = JobBuilder.newJob(TestJobClass.class)
.withIdentity(NEW_JOB)
.usingJobData(JobDataMapSupport.newJobDataMap(Map.of("param1Key", "param1")))
.storeDurably()
.build();
final Trigger expectedTrigger = TriggerBuilder.newTrigger()
.forJob(expectedJobDetail)
.startAt(schedule)
.build();
//@formatter"on
verify(schedulerMock).addJob(jobDetailArgumentCaptor.capture(), eq(true));
verify(schedulerMock).scheduleJob(expectTriggerArgumentCaptor.capture());
assertThat(jobDetailArgumentCaptor.getValue()).usingRecursiveComparison().isEqualTo(expectedJobDetail);
//@formatter:off
assertThat(expectTriggerArgumentCaptor.getValue())
.usingRecursiveComparison()
.ignoringFields("name", "key.name")
.ignoringActualNullFields()
.isEqualTo(expectedTrigger);
//@formatter:on
}
private static class TestJobClass implements Job {
@Override
public void execute(final JobExecutionContext context) {
// doNothing
}
}
}
| 4,685 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjob | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjob/services/BatchJobExecutorTest.java | package com.paypal.jobsystem.batchjob.services;
import com.callibrity.logging.test.LogTrackerStub;
import com.paypal.jobsystem.batchjob.model.BatchJob;
import com.paypal.jobsystem.batchjob.model.BatchJobContext;
import com.paypal.jobsystem.batchjob.model.BatchJobItem;
import com.paypal.jobsystem.batchjob.model.listeners.BatchJobProcessingListener;
import com.paypal.jobsystem.batchjob.model.BatchJobItemValidationResult;
import com.paypal.jobsystem.batchjob.model.BatchJobItemValidationStatus;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.mockito.InOrder;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.junit.jupiter.MockitoExtension;
import java.util.Collection;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.*;
@ExtendWith(MockitoExtension.class)
class BatchJobExecutorTest {
@RegisterExtension
final LogTrackerStub logTrackerStub = LogTrackerStub.create().recordForType(BatchJobExecutor.class);
public static final String MSG_ERROR_WHILE_INVOKING_BATCH_JOB_LISTENER = "Error while invoking batch job listener";
@InjectMocks
private BatchJobExecutor testObj;
@Mock
private BatchJob<BatchJobContext, BatchJobItem<Object>> batchJobMock;
@Mock
private BatchJobContext batchJobContextMock;
@Mock
private BatchJobProcessingListener listenerMock1, listenerMock2;
@Mock
private BatchJobItem<Object> itemMock1, itemMock2;
@Mock
private BatchJobItem<Object> enrichedItemMock1, enrichedItemMock2;
private Collection<BatchJobItem<Object>> itemCollection;
@BeforeEach
public void setUp() {
testObj.batchJobProcessingListeners = List.of(listenerMock1, listenerMock2);
itemCollection = List.of(itemMock1, itemMock2);
lenient().when(batchJobMock.getItems(any(BatchJobContext.class))).thenReturn(itemCollection);
lenient().when(batchJobMock.validateItem(any(), any()))
.thenReturn(BatchJobItemValidationResult.builder().status(BatchJobItemValidationStatus.VALID).build());
lenient().when(batchJobMock.enrichItem(any(BatchJobContext.class), eq(itemMock1)))
.thenReturn(enrichedItemMock1);
lenient().when(batchJobMock.enrichItem(any(BatchJobContext.class), eq(itemMock2)))
.thenReturn(enrichedItemMock2);
}
@SuppressWarnings("unchecked")
@Test
void execute_ShouldRetrieveAndProcessBatchItems() {
testObj.execute(batchJobMock, batchJobContextMock);
final InOrder inOrder = Mockito.inOrder(listenerMock1, listenerMock2, batchJobMock);
inOrder.verify(listenerMock1).onBatchJobStarted(any(BatchJobContext.class));
inOrder.verify(listenerMock2).onBatchJobStarted(any(BatchJobContext.class));
inOrder.verify(listenerMock1).beforeItemExtraction(any(BatchJobContext.class));
inOrder.verify(listenerMock2).beforeItemExtraction(any(BatchJobContext.class));
inOrder.verify(batchJobMock).getItems(any(BatchJobContext.class));
inOrder.verify(listenerMock1).onItemExtractionSuccessful(any(BatchJobContext.class),
(Collection) eq(itemCollection));
inOrder.verify(listenerMock2).onItemExtractionSuccessful(any(BatchJobContext.class),
(Collection) eq(itemCollection));
inOrder.verify(batchJobMock).prepareForItemProcessing(any(BatchJobContext.class), any());
inOrder.verify(listenerMock1).beforeProcessingItem(any(BatchJobContext.class), eq(itemMock1));
inOrder.verify(listenerMock2).beforeProcessingItem(any(BatchJobContext.class), eq(itemMock1));
inOrder.verify(batchJobMock).enrichItem(any(BatchJobContext.class), eq(itemMock1));
inOrder.verify(batchJobMock).validateItem(any(BatchJobContext.class), eq(enrichedItemMock1));
inOrder.verify(batchJobMock).processItem(any(BatchJobContext.class), eq(enrichedItemMock1));
inOrder.verify(listenerMock1).onItemProcessingSuccess(any(BatchJobContext.class), eq(itemMock1));
inOrder.verify(listenerMock2).onItemProcessingSuccess(any(BatchJobContext.class), eq(itemMock1));
inOrder.verify(listenerMock1).beforeProcessingItem(any(BatchJobContext.class), eq(itemMock2));
inOrder.verify(listenerMock2).beforeProcessingItem(any(BatchJobContext.class), eq(itemMock2));
inOrder.verify(batchJobMock).enrichItem(any(BatchJobContext.class), eq(itemMock2));
inOrder.verify(batchJobMock).validateItem(any(BatchJobContext.class), eq(enrichedItemMock2));
inOrder.verify(batchJobMock).processItem(any(BatchJobContext.class), eq(enrichedItemMock2));
inOrder.verify(listenerMock1).onItemProcessingSuccess(any(BatchJobContext.class), eq(itemMock2));
inOrder.verify(listenerMock2).onItemProcessingSuccess(any(BatchJobContext.class), eq(itemMock2));
inOrder.verify(listenerMock1).onBatchJobFinished(any(BatchJobContext.class));
inOrder.verify(listenerMock2).onBatchJobFinished(any(BatchJobContext.class));
}
@Test
void execute_ShouldContinueProcessing_WhenItemValidationReturnsAWarning() {
when(batchJobMock.validateItem(any(), eq(enrichedItemMock2))).thenReturn(
BatchJobItemValidationResult.builder().status(BatchJobItemValidationStatus.WARNING).build());
testObj.execute(batchJobMock, batchJobContextMock);
verify(batchJobMock).enrichItem(any(BatchJobContext.class), eq(itemMock2));
verify(batchJobMock).validateItem(any(BatchJobContext.class), eq(enrichedItemMock2));
verify(batchJobMock).processItem(any(BatchJobContext.class), eq(enrichedItemMock2));
verify(listenerMock1).onItemProcessingValidationFailure(any(BatchJobContext.class), eq(itemMock2),
any(BatchJobItemValidationResult.class));
verify(listenerMock2).onItemProcessingValidationFailure(any(BatchJobContext.class), eq(itemMock2),
any(BatchJobItemValidationResult.class));
verify(listenerMock1).onItemProcessingSuccess(any(BatchJobContext.class), eq(itemMock2));
verify(listenerMock2).onItemProcessingSuccess(any(BatchJobContext.class), eq(itemMock2));
}
@Test
void execute_ShouldAbortItemProcessingAndRegisterFailure_WhenItemValidationReturnsAnInvalid() {
when(batchJobMock.validateItem(any(), eq(enrichedItemMock2))).thenReturn(
BatchJobItemValidationResult.builder().status(BatchJobItemValidationStatus.INVALID).build());
testObj.execute(batchJobMock, batchJobContextMock);
verify(batchJobMock).enrichItem(any(BatchJobContext.class), eq(itemMock2));
verify(batchJobMock).validateItem(any(BatchJobContext.class), eq(enrichedItemMock2));
verify(batchJobMock, times(0)).processItem(any(BatchJobContext.class), eq(enrichedItemMock2));
verify(listenerMock1).onItemProcessingValidationFailure(any(BatchJobContext.class), eq(itemMock2),
any(BatchJobItemValidationResult.class));
verify(listenerMock2).onItemProcessingValidationFailure(any(BatchJobContext.class), eq(itemMock2),
any(BatchJobItemValidationResult.class));
verify(listenerMock1).onItemProcessingFailure(any(BatchJobContext.class), eq(itemMock2), eq(null));
verify(listenerMock2).onItemProcessingFailure(any(BatchJobContext.class), eq(itemMock2), eq(null));
}
@Test
void execute_ShouldLogAnError_WhenOnBatchJobStartedThrowsARunTimeException() {
doThrow(RuntimeException.class).when(listenerMock1).onBatchJobStarted(any(BatchJobContext.class));
testObj.execute(batchJobMock, batchJobContextMock);
assertThat(logTrackerStub.contains(MSG_ERROR_WHILE_INVOKING_BATCH_JOB_LISTENER)).isTrue();
}
@Test
void execute_ShouldLogAnError_WhenBeforeItemExtractionThrowsARunTimeException() {
doThrow(RuntimeException.class).when(listenerMock1).beforeItemExtraction(any(BatchJobContext.class));
testObj.execute(batchJobMock, batchJobContextMock);
assertThat(logTrackerStub.contains(MSG_ERROR_WHILE_INVOKING_BATCH_JOB_LISTENER)).isTrue();
}
@Test
void execute_ShouldLogAnError_WhenOnItemExtractionSuccessfulThrowsARunTimeException() {
doThrow(RuntimeException.class).when(listenerMock1).onItemExtractionSuccessful(any(BatchJobContext.class),
any());
testObj.execute(batchJobMock, batchJobContextMock);
assertThat(logTrackerStub.contains(MSG_ERROR_WHILE_INVOKING_BATCH_JOB_LISTENER)).isTrue();
}
@Test
void execute_ShouldCallOnItemExtractionFailure_WhenBatchJobItemBatchJobItemsExtractorThrowsARunTimeException() {
doThrow(RuntimeException.class).when(batchJobMock).getItems(any(BatchJobContext.class));
testObj.execute(batchJobMock, batchJobContextMock);
verify(listenerMock1).onItemExtractionFailure(any(BatchJobContext.class), any(RuntimeException.class));
verify(listenerMock2).onItemExtractionFailure(any(BatchJobContext.class), any(RuntimeException.class));
}
@Test
void execute_ShouldLogAnError_WhenOnItemExtractionFailureThrowsARunTimeException() {
doThrow(RuntimeException.class).when(batchJobMock).getItems(any(BatchJobContext.class));
doThrow(RuntimeException.class).when(listenerMock1).onItemExtractionFailure(any(BatchJobContext.class),
any(RuntimeException.class));
testObj.execute(batchJobMock, batchJobContextMock);
assertThat(logTrackerStub.contains(MSG_ERROR_WHILE_INVOKING_BATCH_JOB_LISTENER)).isTrue();
}
@Test
void execute_ShouldCallOnPreparationForProcessingFailure_WhenPreparationForProcessingThrowsARunTimeException() {
doThrow(RuntimeException.class).when(batchJobMock).prepareForItemProcessing(any(), any());
testObj.execute(batchJobMock, batchJobContextMock);
verify(listenerMock1).onPreparationForProcessingFailure(any(BatchJobContext.class),
any(RuntimeException.class));
verify(listenerMock2).onPreparationForProcessingFailure(any(BatchJobContext.class),
any(RuntimeException.class));
}
@Test
void execute_ShouldLogAnError_WhenOnPreparationForProcessingFailureThrowsARunTimeException() {
doThrow(RuntimeException.class).when(batchJobMock).prepareForItemProcessing(any(), any());
doThrow(RuntimeException.class).when(listenerMock1)
.onPreparationForProcessingFailure(any(BatchJobContext.class), any(RuntimeException.class));
testObj.execute(batchJobMock, batchJobContextMock);
assertThat(logTrackerStub.contains(MSG_ERROR_WHILE_INVOKING_BATCH_JOB_LISTENER)).isTrue();
}
@Test
void execute_ShouldLogAnError_WhenOnBatchJobFailureThrowsARunTimeException() {
doThrow(RuntimeException.class).when(batchJobMock).getItems(any(BatchJobContext.class));
doThrow(RuntimeException.class).when(listenerMock1).onBatchJobFailure(any(BatchJobContext.class),
any(RuntimeException.class));
testObj.execute(batchJobMock, batchJobContextMock);
assertThat(logTrackerStub.contains(MSG_ERROR_WHILE_INVOKING_BATCH_JOB_LISTENER)).isTrue();
}
@Test
void execute_ShouldSetBatchJobAsFinished_WhenJobFinishesWithoutErrors() {
testObj.execute(batchJobMock, batchJobContextMock);
verify(batchJobContextMock, times(1)).setFinishedStatus();
}
@Test
void execute_ShouldSetBatchJobAsFinishedWithErrors_WhenThereIsAPartialExtraction() {
when(batchJobContextMock.isPartialItemExtraction()).thenReturn(true);
testObj.execute(batchJobMock, batchJobContextMock);
verify(batchJobContextMock, times(0)).setFinishedStatus();
verify(batchJobContextMock, times(1)).setFinishedWithFailuresStatus();
}
}
| 4,686 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobaudit | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobaudit/controllers/JobAuditControllerTest.java | package com.paypal.jobsystem.batchjobaudit.controllers;
import com.paypal.jobsystem.batchjobaudit.services.BatchJobTrackingService;
import com.paypal.jobsystem.batchjobaudit.controllers.converters.BatchJobItemTrackInfoEntityConverter;
import com.paypal.jobsystem.batchjobaudit.controllers.converters.BatchJobTrackInfoEntityConverter;
import com.paypal.jobsystem.batchjobaudit.controllers.dto.BatchJobItemTrackInfoResponse;
import com.paypal.jobsystem.batchjobaudit.controllers.dto.BatchJobTrackInfoResponse;
import com.paypal.jobsystem.batchjobaudit.repositories.entities.BatchJobItemTrackInfoEntity;
import com.paypal.jobsystem.batchjobaudit.repositories.entities.BatchJobTrackInfoEntity;
import com.paypal.jobsystem.batchjobaudit.controllers.JobAuditController;
import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import java.time.LocalDateTime;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
class JobAuditControllerTest {
@InjectMocks
private JobAuditController testObj;
@Mock
private BatchJobItemTrackInfoEntityConverter batchJobItemTrackInfoEntityConverterMock;
@Mock
private BatchJobTrackInfoEntityConverter batchJobTrackInfoEntityConverterMock;
@Mock
private BatchJobTrackingService batchJobTrackingServiceMock;
@Mock
private LocalDateTime localDateTime1Mock, localDateTime2Mock;
@Mock
private BatchJobTrackInfoResponse batchJobTrackInfoResponse1Mock, batchJobTrackInfoResponse2Mock;
@Mock
private BatchJobItemTrackInfoResponse batchJobItemTrackInfoResponse1Mock, batchJobItemTrackInfoResponse2Mock;
@Mock
private BatchJobItemTrackInfoEntity batchJobItemTrackInfoEntity1Mock, batchJobItemTrackInfoEntity2Mock;
@Mock
private BatchJobTrackInfoEntity batchJobTrackInfoEntity1Mock, batchJobTrackInfoEntity2Mock;
@Test
void getAllJobs_ShouldReturnAllJobsInsideTimeRage() {
final List<BatchJobTrackInfoEntity> batchJobTrackInfoEntities = List.of(batchJobTrackInfoEntity1Mock,
batchJobTrackInfoEntity2Mock);
final List<BatchJobTrackInfoResponse> batchJobTrackInfoResponses = List.of(batchJobTrackInfoResponse1Mock,
batchJobTrackInfoResponse2Mock);
when(batchJobTrackingServiceMock.getJobTrackingEntries(localDateTime1Mock, localDateTime2Mock))
.thenReturn(batchJobTrackInfoEntities);
when(batchJobTrackInfoEntityConverterMock.toResponse(batchJobTrackInfoEntities))
.thenReturn(batchJobTrackInfoResponses);
final List<BatchJobTrackInfoResponse> result = testObj.getAllJobs(localDateTime1Mock, localDateTime2Mock);
assertThat(result).containsAll(batchJobTrackInfoResponses);
}
@Test
void getJobItems_ShouldReturnAllTrackingItemsOfAJob() {
final List<BatchJobItemTrackInfoEntity> batchJobItemTrackInfoEntities = List
.of(batchJobItemTrackInfoEntity1Mock, batchJobItemTrackInfoEntity2Mock);
final List<BatchJobItemTrackInfoResponse> batchJobItemTrackInfoResponses = List
.of(batchJobItemTrackInfoResponse1Mock, batchJobItemTrackInfoResponse2Mock);
when(batchJobTrackingServiceMock.getJobItemTrackingEntries("job1")).thenReturn(batchJobItemTrackInfoEntities);
when(batchJobItemTrackInfoEntityConverterMock.toResponse(batchJobItemTrackInfoEntities))
.thenReturn(batchJobItemTrackInfoResponses);
final List<BatchJobItemTrackInfoResponse> result = testObj.getJobItems("job1");
assertThat(result).containsAll(batchJobItemTrackInfoResponses);
}
}
| 4,687 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobaudit | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobaudit/listeners/TrackingBatchJobItemProcessingListenerTest.java | package com.paypal.jobsystem.batchjobaudit.listeners;
import com.paypal.jobsystem.batchjob.model.BatchJobContext;
import com.paypal.jobsystem.batchjob.model.BatchJobItem;
import com.paypal.jobsystem.batchjobaudit.services.BatchJobTrackingService;
import com.paypal.jobsystem.batchjobaudit.listeners.TrackingBatchJobItemProcessingListener;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Spy;
import org.mockito.junit.jupiter.MockitoExtension;
import java.util.List;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
class TrackingBatchJobItemProcessingListenerTest {
public static final String JOB_ID = "1234";
public static final String JOB_NAME = "jobName";
@Spy
@InjectMocks
private TrackingBatchJobItemProcessingListener testObj;
@Mock
private BatchJobTrackingService batchJobTrackingServiceMock;
@Mock
private BatchJobContext batchJobContextMock;
@Mock
private BatchJobItem<?> batchJobItemMock;
@Test
void beforeItemExtraction_ShouldCallSuperBeforeItemExtraction() {
testObj.beforeItemExtraction(batchJobContextMock);
verify(testObj).callSuperBeforeItemExtraction(batchJobContextMock);
}
@Test
void onItemExtractionSuccessful_ShouldCallBatchJobTrackingServiceTrackJobItemsAdded() {
when(batchJobContextMock.getJobUuid()).thenReturn(JOB_ID);
testObj.onItemExtractionSuccessful(batchJobContextMock, List.of(batchJobItemMock));
verify(batchJobTrackingServiceMock).trackJobItemsAdded(JOB_ID, List.of(batchJobItemMock));
}
@Test
void beforeItemExtraction_ShouldCallSuperOnItemExtractionFailure() {
final Exception exception = new Exception();
testObj.onItemExtractionFailure(batchJobContextMock, exception);
verify(testObj).onItemExtractionFailure(batchJobContextMock, exception);
}
@Test
void beforeProcessingItem_ShouldCallBatchJobTrackingServiceTrackJobItemProcessingStarted() {
when(batchJobContextMock.getJobUuid()).thenReturn(JOB_ID);
testObj.beforeProcessingItem(batchJobContextMock, batchJobItemMock);
verify(batchJobTrackingServiceMock).trackJobItemProcessingStarted(JOB_ID, batchJobItemMock);
}
@Test
void onItemProcessingFailure_ShouldCallBatchJobTrackingServiceTrackJobItemProcessingFinished() {
final Exception exception = new Exception();
when(batchJobContextMock.getJobUuid()).thenReturn(JOB_ID);
testObj.onItemProcessingFailure(batchJobContextMock, batchJobItemMock, exception);
verify(batchJobTrackingServiceMock).trackJobItemProcessingFinished(JOB_ID, batchJobItemMock, false);
}
@Test
void onItemProcessingSuccess_ShouldCallBatchJobTrackingServiceTrackJobItemProcessingFinished() {
when(batchJobContextMock.getJobUuid()).thenReturn(JOB_ID);
testObj.onItemProcessingSuccess(batchJobContextMock, batchJobItemMock);
verify(batchJobTrackingServiceMock).trackJobItemProcessingFinished(JOB_ID, batchJobItemMock, true);
}
@Test
void onBatchJobStarted_ShouldCallBatchJobTrackingServiceMarkNonFinishedJobsAsAbortedAndTrackJobStart() {
when(batchJobContextMock.getJobName()).thenReturn(JOB_NAME);
when(batchJobContextMock.getJobUuid()).thenReturn(JOB_ID);
testObj.onBatchJobStarted(batchJobContextMock);
verify(batchJobTrackingServiceMock).markNonFinishedJobsAsAborted(JOB_NAME);
verify(batchJobTrackingServiceMock).trackJobStart(JOB_ID, JOB_NAME);
}
@Test
void onBatchJobFinished_ShouldCallBatchJobTrackingServiceTrackJobFinishedAsSuccessful_WhenHasNotItemsFailed() {
when(batchJobContextMock.getJobUuid()).thenReturn(JOB_ID);
when(batchJobContextMock.getNumberOfItemsFailed()).thenReturn(0);
testObj.onBatchJobFinished(batchJobContextMock);
verify(batchJobTrackingServiceMock).trackJobFinished(JOB_ID, true);
}
@Test
void onBatchJobFinished_ShouldCallBatchJobTrackingServiceTrackJobFinishedAsFailed_WhenHasItemsFailed() {
when(batchJobContextMock.getJobUuid()).thenReturn(JOB_ID);
when(batchJobContextMock.getNumberOfItemsFailed()).thenReturn(1);
testObj.onBatchJobFinished(batchJobContextMock);
verify(batchJobTrackingServiceMock).trackJobFinished(JOB_ID, false);
}
@Test
void onBatchJobFinished_ShouldCallBatchJobTrackingServiceTrackJobFinishedAsFailed_WhenThereWasAPartialExtraction() {
when(batchJobContextMock.getJobUuid()).thenReturn(JOB_ID);
when(batchJobContextMock.isPartialItemExtraction()).thenReturn(true);
testObj.onBatchJobFinished(batchJobContextMock);
verify(batchJobTrackingServiceMock).trackJobFinished(JOB_ID, false);
}
@Test
void onBatchJobFailure_ShouldCallBatchJobTrackingServiceTrackJobFailure() {
when(batchJobContextMock.getJobUuid()).thenReturn(JOB_ID);
when(batchJobContextMock.getJobName()).thenReturn(JOB_NAME);
final Exception exception = new Exception();
testObj.onBatchJobFailure(batchJobContextMock, exception);
verify(batchJobTrackingServiceMock).trackJobFailure(JOB_ID, JOB_NAME);
}
}
| 4,688 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobaudit | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobaudit/services/BatchJobTrackingServiceImplTest.java | package com.paypal.jobsystem.batchjobaudit.services;
import com.paypal.jobsystem.batchjob.model.BatchJobItem;
import com.paypal.jobsystem.batchjob.model.BatchJobItemStatus;
import com.paypal.jobsystem.batchjob.model.BatchJobStatus;
import com.paypal.jobsystem.batchjobaudit.repositories.entities.BatchJobItemTrackInfoEntity;
import com.paypal.jobsystem.batchjobaudit.repositories.entities.BatchJobItemTrackingInfoId;
import com.paypal.jobsystem.batchjobaudit.repositories.entities.BatchJobTrackInfoEntity;
import com.paypal.jobsystem.batchjobaudit.repositories.BatchJobItemTrackingRepository;
import com.paypal.jobsystem.batchjobaudit.repositories.BatchJobTrackingRepository;
import com.paypal.infrastructure.support.date.TimeMachine;
import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Captor;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.springframework.data.domain.Pageable;
import java.time.LocalDateTime;
import java.util.List;
import java.util.Optional;
import static com.paypal.jobsystem.batchjobaudit.services.BatchJobTrackingServiceImpl.ITEM_NOT_FINISHED_STATUSES;
import static com.paypal.jobsystem.batchjobaudit.services.BatchJobTrackingServiceImpl.JOB_NOT_FINISHED_STATUSES;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
class BatchJobTrackingServiceImplTest {
public static final String JOB_ID = "1234";
public static final String JOB_TYPE = "sellers";
public static final String BATCH_JOB_ITEM_ID = "0001";
@InjectMocks
private BatchJobTrackingServiceImpl testObj;
@Mock
private BatchJobTrackingRepository batchJobTrackingRepositoryMock;
@Mock
private BatchJobItemTrackingRepository batchJobItemTrackingRepositoryMock;
@Captor
private ArgumentCaptor<BatchJobTrackInfoEntity> batchJobTrackInfoEntityArgumentCaptor;
@Captor
private ArgumentCaptor<BatchJobItemTrackingInfoId> batchJobItemTrackingInfoIdArgumentCaptor;
@Captor
private ArgumentCaptor<List<BatchJobItemTrackInfoEntity>> batchJobItemTrackInfoEntitiesArgumentCaptor;
@Mock
private BatchJobTrackInfoEntity batchJobTrackInfoEntityMock;
@Mock
private BatchJobTrackInfoEntity.BatchJobTrackInfoEntityBuilder batchJobTrackInfoEntityBuilderMock;
@Mock
private BatchJobItemTrackInfoEntity.BatchJobItemTrackInfoEntityBuilder batchJobItemTrackInfoEntityBuilderMock;
@Mock
private BatchJobItem<?> batchJobItemMock;
@Mock
private BatchJobItemTrackInfoEntity batchJobItemTrackInfoEntityMock;
@Test
void trackJobStart_ShouldSaveABatchJobTrackInfoEntityWithRunningStatusAndStartTimeAsNow() {
final LocalDateTime now = TimeMachine.now();
TimeMachine.useFixedClockAt(now);
testObj.trackJobStart(JOB_ID, JOB_TYPE);
verify(batchJobTrackingRepositoryMock).save(batchJobTrackInfoEntityArgumentCaptor.capture());
assertThat(batchJobTrackInfoEntityArgumentCaptor.getValue()).isEqualTo(BatchJobTrackInfoEntity.builder()
.batchJobId(JOB_ID).batchJobType(JOB_TYPE).startTime(now).status(BatchJobStatus.RUNNING).build());
}
@Test
void trackJobFinished_ShouldSetJobAsFinishedAndFinishTimeAsNow_WhenSuccessful() {
final LocalDateTime now = TimeMachine.now();
TimeMachine.useFixedClockAt(now);
when(batchJobTrackingRepositoryMock.getReferenceById(JOB_ID)).thenReturn(batchJobTrackInfoEntityMock);
testObj.trackJobFinished(JOB_ID, true);
verify(batchJobTrackInfoEntityMock).setFinishTime(now);
verify(batchJobTrackInfoEntityMock).setStatus(BatchJobStatus.FINISHED);
verify(batchJobTrackingRepositoryMock).save(batchJobTrackInfoEntityMock);
}
@Test
void trackJobFinished_ShouldSetJobAsFinishedWithFailuresAndFinishTimeAsNow_WhenNotSuccessful() {
final LocalDateTime now = TimeMachine.now();
TimeMachine.useFixedClockAt(now);
when(batchJobTrackingRepositoryMock.getReferenceById(JOB_ID)).thenReturn(batchJobTrackInfoEntityMock);
testObj.trackJobFinished(JOB_ID, false);
verify(batchJobTrackInfoEntityMock).setFinishTime(now);
verify(batchJobTrackInfoEntityMock).setStatus(BatchJobStatus.FINISHED_WITH_FAILURES);
verify(batchJobTrackingRepositoryMock).save(batchJobTrackInfoEntityMock);
}
@Test
void trackJobFailure_ShouldMarkJobAsFailed_WhenJobIsFound() {
final LocalDateTime now = TimeMachine.now();
TimeMachine.useFixedClockAt(now);
when(batchJobTrackingRepositoryMock.findById(JOB_ID)).thenReturn(Optional.of(batchJobTrackInfoEntityMock));
when(batchJobTrackInfoEntityMock.toBuilder()).thenReturn(batchJobTrackInfoEntityBuilderMock);
when(batchJobTrackInfoEntityBuilderMock.status(BatchJobStatus.FAILED))
.thenReturn(batchJobTrackInfoEntityBuilderMock);
when(batchJobTrackInfoEntityBuilderMock.finishTime(now)).thenReturn(batchJobTrackInfoEntityBuilderMock);
when(batchJobTrackInfoEntityBuilderMock.build()).thenReturn(batchJobTrackInfoEntityMock);
testObj.trackJobFailure(JOB_ID, JOB_TYPE);
verify(batchJobTrackingRepositoryMock).save(batchJobTrackInfoEntityArgumentCaptor.capture());
assertThat(batchJobTrackInfoEntityArgumentCaptor.getValue()).isEqualTo(batchJobTrackInfoEntityMock);
}
@Test
void trackJobFailure_ShouldCreateAFailedJob_WhenJobIsNotFound() {
final LocalDateTime now = TimeMachine.now();
TimeMachine.useFixedClockAt(now);
when(batchJobTrackingRepositoryMock.findById(JOB_ID)).thenReturn(Optional.empty());
testObj.trackJobFailure(JOB_ID, JOB_TYPE);
verify(batchJobTrackingRepositoryMock).save(batchJobTrackInfoEntityArgumentCaptor.capture());
assertThat(batchJobTrackInfoEntityArgumentCaptor.getValue())
.isEqualTo(BatchJobTrackInfoEntity.builder().batchJobId(JOB_ID).batchJobType(JOB_TYPE)
.status(BatchJobStatus.FAILED).startTime(now).finishTime(now).build());
}
@Test
void markNonFinishedJobsAsAbortedByBatchJobType_ShouldMarkAsAbortedTheNotFinishedBatchJobTrackInfoEntitiesAndTheBatchJobItemTrackInfoEntities_WhenBatchJobItemTrackInfoEntitiesStatusesAreFinishedStatuses() {
final BatchJobTrackInfoEntity batchJobTrackInfoEntity = BatchJobTrackInfoEntity.builder().batchJobId(JOB_ID)
.build();
when(batchJobTrackingRepositoryMock.findByBatchJobTypeAndStatusIn(JOB_TYPE, JOB_NOT_FINISHED_STATUSES))
.thenReturn(List.of(batchJobTrackInfoEntity));
final BatchJobItemTrackInfoEntity batchJobItemTrackInfoEntity = BatchJobItemTrackInfoEntity.builder()
.batchJobId(JOB_ID).status(BatchJobItemStatus.FAILED).build();
when(batchJobItemTrackingRepositoryMock.findByBatchJobId(JOB_ID))
.thenReturn(List.of(batchJobItemTrackInfoEntity));
testObj.markNonFinishedJobsAsAborted(JOB_TYPE);
verify(batchJobTrackingRepositoryMock).save(batchJobTrackInfoEntityArgumentCaptor.capture());
batchJobTrackInfoEntity.setStatus(BatchJobStatus.ABORTED);
assertThat(batchJobTrackInfoEntityArgumentCaptor.getValue()).isEqualTo(batchJobTrackInfoEntity);
verify(batchJobItemTrackingRepositoryMock).saveAll(batchJobItemTrackInfoEntitiesArgumentCaptor.capture());
batchJobItemTrackInfoEntity.setStatus(BatchJobItemStatus.ABORTED);
assertThat(batchJobItemTrackInfoEntitiesArgumentCaptor.getValue()).containsExactly(batchJobItemTrackInfoEntity);
}
@Test
void markNonFinishedJobsAsAbortedByBatchJobType_ShouldMarkAsAbortedTheNotFinishedBatchJobTrackInfoEntitiesAndNotTheBatchJobItemTrackInfoEntities_WhenBatchJobItemTrackInfoEntitiesStatusesAreNotFinishedStatuses() {
final BatchJobTrackInfoEntity batchJobTrackInfoEntity = BatchJobTrackInfoEntity.builder().batchJobId(JOB_ID)
.build();
when(batchJobTrackingRepositoryMock.findByBatchJobTypeAndStatusIn(JOB_TYPE, JOB_NOT_FINISHED_STATUSES))
.thenReturn(List.of(batchJobTrackInfoEntity));
final BatchJobItemTrackInfoEntity batchJobItemTrackInfoEntity = BatchJobItemTrackInfoEntity.builder()
.batchJobId(JOB_ID).status(BatchJobItemStatus.PENDING).build();
when(batchJobItemTrackingRepositoryMock.findByBatchJobId(JOB_ID))
.thenReturn(List.of(batchJobItemTrackInfoEntity));
testObj.markNonFinishedJobsAsAborted(JOB_TYPE);
verify(batchJobTrackingRepositoryMock).save(batchJobTrackInfoEntityArgumentCaptor.capture());
batchJobTrackInfoEntity.setStatus(BatchJobStatus.ABORTED);
assertThat(batchJobTrackInfoEntityArgumentCaptor.getValue()).isEqualTo(batchJobTrackInfoEntity);
verify(batchJobItemTrackingRepositoryMock).saveAll(List.of());
}
@Test
void markNonFinishedJobsAsAborted_ShouldMarkAsAbortedTheNotFinishedBatchJobTrackInfoEntitiesAndTheBatchJobItemTrackInfoEntities_WhenBatchJobItemTrackInfoEntitiesStatusesAreFinishedStatuses() {
final BatchJobTrackInfoEntity batchJobTrackInfoEntity = BatchJobTrackInfoEntity.builder().batchJobId(JOB_ID)
.build();
when(batchJobTrackingRepositoryMock.findByStatusIn(JOB_NOT_FINISHED_STATUSES))
.thenReturn(List.of(batchJobTrackInfoEntity));
final BatchJobItemTrackInfoEntity batchJobItemTrackInfoEntity = BatchJobItemTrackInfoEntity.builder()
.batchJobId(JOB_ID).status(BatchJobItemStatus.FAILED).build();
when(batchJobItemTrackingRepositoryMock.findByBatchJobId(JOB_ID))
.thenReturn(List.of(batchJobItemTrackInfoEntity));
testObj.markNonFinishedJobsAsAborted();
verify(batchJobTrackingRepositoryMock).save(batchJobTrackInfoEntityArgumentCaptor.capture());
batchJobTrackInfoEntity.setStatus(BatchJobStatus.ABORTED);
assertThat(batchJobTrackInfoEntityArgumentCaptor.getValue()).isEqualTo(batchJobTrackInfoEntity);
verify(batchJobItemTrackingRepositoryMock).saveAll(batchJobItemTrackInfoEntitiesArgumentCaptor.capture());
batchJobItemTrackInfoEntity.setStatus(BatchJobItemStatus.ABORTED);
assertThat(batchJobItemTrackInfoEntitiesArgumentCaptor.getValue()).containsExactly(batchJobItemTrackInfoEntity);
}
@Test
void markNonFinishedJobsAsAborted_ShouldMarkAsAbortedTheNotFinishedBatchJobTrackInfoEntitiesAndNotTheBatchJobItemTrackInfoEntities_WhenBatchJobItemTrackInfoEntitiesStatusesAreNotFinishedStatuses() {
final BatchJobTrackInfoEntity batchJobTrackInfoEntity = BatchJobTrackInfoEntity.builder().batchJobId(JOB_ID)
.build();
when(batchJobTrackingRepositoryMock.findByStatusIn(JOB_NOT_FINISHED_STATUSES))
.thenReturn(List.of(batchJobTrackInfoEntity));
final BatchJobItemTrackInfoEntity batchJobItemTrackInfoEntity = BatchJobItemTrackInfoEntity.builder()
.batchJobId(JOB_ID).status(BatchJobItemStatus.PENDING).build();
when(batchJobItemTrackingRepositoryMock.findByBatchJobId(JOB_ID))
.thenReturn(List.of(batchJobItemTrackInfoEntity));
testObj.markNonFinishedJobsAsAborted();
verify(batchJobTrackingRepositoryMock).save(batchJobTrackInfoEntityArgumentCaptor.capture());
batchJobTrackInfoEntity.setStatus(BatchJobStatus.ABORTED);
assertThat(batchJobTrackInfoEntityArgumentCaptor.getValue()).isEqualTo(batchJobTrackInfoEntity);
verify(batchJobItemTrackingRepositoryMock).saveAll(List.of());
}
@Test
void trackJobItemsAdded_ShouldCreateBatchJobItemTrackInfoEntitiesAndSaveThem() {
when(batchJobItemMock.getItemId()).thenReturn(BATCH_JOB_ITEM_ID);
when(batchJobItemMock.getItemType()).thenReturn(JOB_TYPE);
testObj.trackJobItemsAdded(JOB_ID, List.of(batchJobItemMock));
verify(batchJobItemTrackingRepositoryMock).saveAll(batchJobItemTrackInfoEntitiesArgumentCaptor.capture());
assertThat(batchJobItemTrackInfoEntitiesArgumentCaptor.getValue())
.containsExactly(BatchJobItemTrackInfoEntity.builder().batchJobId(JOB_ID).itemId(BATCH_JOB_ITEM_ID)
.itemType(JOB_TYPE).status(BatchJobItemStatus.PENDING).build());
}
@Test
void trackJobItemProcessingStarted_ShouldSetJobStatusAsInProgress() {
when(batchJobItemMock.getItemId()).thenReturn(BATCH_JOB_ITEM_ID);
when(batchJobItemMock.getItemType()).thenReturn(JOB_TYPE);
when(batchJobItemTrackingRepositoryMock.getReferenceById(batchJobItemTrackingInfoIdArgumentCaptor.capture()))
.thenReturn(batchJobItemTrackInfoEntityMock);
when(batchJobItemTrackInfoEntityMock.toBuilder()).thenReturn(batchJobItemTrackInfoEntityBuilderMock);
when(batchJobItemTrackInfoEntityBuilderMock.status(BatchJobItemStatus.IN_PROGRESS))
.thenReturn(batchJobItemTrackInfoEntityBuilderMock);
when(batchJobItemTrackInfoEntityBuilderMock.build()).thenReturn(batchJobItemTrackInfoEntityMock);
testObj.trackJobItemProcessingStarted(JOB_ID, batchJobItemMock);
assertThat(batchJobItemTrackingInfoIdArgumentCaptor.getValue()).isEqualTo(BatchJobItemTrackingInfoId.builder()
.batchJobId(JOB_ID).itemType(JOB_TYPE).itemId(BATCH_JOB_ITEM_ID).build());
verify(batchJobItemTrackingRepositoryMock).save(batchJobItemTrackInfoEntityMock);
}
@Test
void trackJobItemProcessingFinished_ShouldSetJobStatusAsSuccessful_WenIsSuccessful() {
when(batchJobItemMock.getItemId()).thenReturn(BATCH_JOB_ITEM_ID);
when(batchJobItemMock.getItemType()).thenReturn(JOB_TYPE);
when(batchJobItemTrackingRepositoryMock.getReferenceById(batchJobItemTrackingInfoIdArgumentCaptor.capture()))
.thenReturn(batchJobItemTrackInfoEntityMock);
when(batchJobItemTrackInfoEntityMock.toBuilder()).thenReturn(batchJobItemTrackInfoEntityBuilderMock);
when(batchJobItemTrackInfoEntityBuilderMock.status(BatchJobItemStatus.SUCCESSFUL))
.thenReturn(batchJobItemTrackInfoEntityBuilderMock);
when(batchJobItemTrackInfoEntityBuilderMock.build()).thenReturn(batchJobItemTrackInfoEntityMock);
testObj.trackJobItemProcessingFinished(JOB_ID, batchJobItemMock, true);
assertThat(batchJobItemTrackingInfoIdArgumentCaptor.getValue()).isEqualTo(BatchJobItemTrackingInfoId.builder()
.batchJobId(JOB_ID).itemType(JOB_TYPE).itemId(BATCH_JOB_ITEM_ID).build());
verify(batchJobItemTrackingRepositoryMock).save(batchJobItemTrackInfoEntityMock);
}
@Test
void trackJobItemProcessingFinished_ShouldSetJobStatusAsFailed_WenIsNotSuccessful() {
when(batchJobItemMock.getItemId()).thenReturn(BATCH_JOB_ITEM_ID);
when(batchJobItemMock.getItemType()).thenReturn(JOB_TYPE);
when(batchJobItemTrackingRepositoryMock.getReferenceById(batchJobItemTrackingInfoIdArgumentCaptor.capture()))
.thenReturn(batchJobItemTrackInfoEntityMock);
when(batchJobItemTrackInfoEntityMock.toBuilder()).thenReturn(batchJobItemTrackInfoEntityBuilderMock);
when(batchJobItemTrackInfoEntityBuilderMock.status(BatchJobItemStatus.FAILED))
.thenReturn(batchJobItemTrackInfoEntityBuilderMock);
when(batchJobItemTrackInfoEntityBuilderMock.build()).thenReturn(batchJobItemTrackInfoEntityMock);
testObj.trackJobItemProcessingFinished(JOB_ID, batchJobItemMock, false);
assertThat(batchJobItemTrackingInfoIdArgumentCaptor.getValue()).isEqualTo(BatchJobItemTrackingInfoId.builder()
.batchJobId(JOB_ID).itemType(JOB_TYPE).itemId(BATCH_JOB_ITEM_ID).build());
verify(batchJobItemTrackingRepositoryMock).save(batchJobItemTrackInfoEntityMock);
}
@Test
void getItemsBeingProcessedOrEnquedToProcess_ShouldReturnTheBatchJobItemTrackInfoEntityWithStatusNotFinished() {
when(batchJobItemTrackingRepositoryMock.findByItemTypeAndStatusIn(JOB_TYPE, ITEM_NOT_FINISHED_STATUSES))
.thenReturn(List.of(batchJobItemTrackInfoEntityMock));
final List<BatchJobItemTrackInfoEntity> result = testObj.getItemsBeingProcessedOrEnquedToProcess(JOB_TYPE);
assertThat(result).isEqualTo(List.of(batchJobItemTrackInfoEntityMock));
}
@Test
void findLastJobExecutionWithNonEmptyExtraction_ShouldReturnJobWithNonEmptyExtraction() {
TimeMachine.useFixedClockAt(LocalDateTime.now());
when(batchJobTrackingRepositoryMock.findLastJobExecutionsWithItems(JOB_TYPE, TimeMachine.now(),
Pageable.ofSize(1))).thenReturn(List.of(batchJobTrackInfoEntityMock));
final Optional<BatchJobTrackInfoEntity> result = testObj.findLastJobExecutionWithNonEmptyExtraction(JOB_TYPE,
TimeMachine.now());
assertThat(result).contains(batchJobTrackInfoEntityMock);
}
@Test
void findLastJobExecutionWithNonEmptyExtraction_ShouldReturnEmpty_WhenNoJobsFound() {
TimeMachine.useFixedClockAt(LocalDateTime.now());
when(batchJobTrackingRepositoryMock.findLastJobExecutionsWithItems(JOB_TYPE, TimeMachine.now(),
Pageable.ofSize(1))).thenReturn(List.of());
final Optional<BatchJobTrackInfoEntity> result = testObj.findLastJobExecutionWithNonEmptyExtraction(JOB_TYPE,
TimeMachine.now());
assertThat(result).isEmpty();
}
}
| 4,689 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzadapter | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzadapter/support/AbstractBatchJobSupportQuartzJobTest.java | package com.paypal.jobsystem.quartzadapter.support;
import com.paypal.jobsystem.batchjob.model.BatchJob;
import com.paypal.jobsystem.batchjob.model.BatchJobContext;
import com.paypal.jobsystem.batchjob.model.BatchJobItem;
import com.paypal.jobsystem.quartzadapter.support.AbstractBatchJobSupportQuartzJob;
import com.paypal.jobsystem.quartzadapter.job.QuartzBatchJobAdapterFactory;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.Spy;
import org.mockito.junit.jupiter.MockitoExtension;
import org.mockito.InjectMocks;
import org.quartz.Job;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;
import static org.mockito.Mockito.*;
@ExtendWith(MockitoExtension.class)
class AbstractBatchJobSupportQuartzJobTest {
@InjectMocks
@Spy
private MyAbstractBatchJobSupportQuartzJob testObj;
@Mock
private QuartzBatchJobAdapterFactory quartzBatchJobAdapterFactory;
@Mock
private BatchJob<BatchJobContext, BatchJobItem<?>> batchJobMock;
@Mock
private Job jobMock;
@Mock
private JobExecutionContext jobExecutionContextMock;
@Test
void executeBatchJob_shouldWrapWithQuartzJobAndExecute() throws JobExecutionException {
when(quartzBatchJobAdapterFactory.getQuartzJob(batchJobMock)).thenReturn(jobMock);
testObj.executeBatchJob(batchJobMock, jobExecutionContextMock);
verify(jobMock).execute(jobExecutionContextMock);
}
static class MyAbstractBatchJobSupportQuartzJob extends AbstractBatchJobSupportQuartzJob {
protected MyAbstractBatchJobSupportQuartzJob(final QuartzBatchJobAdapterFactory quartzBatchJobAdapterFactory) {
super(quartzBatchJobAdapterFactory);
}
@Override
public void execute(final JobExecutionContext context) throws JobExecutionException {
// Do Nothing
}
}
}
| 4,690 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzadapter | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzadapter/job/QuartzBatchJobAdapterTest.java | package com.paypal.jobsystem.quartzadapter.job;
import com.paypal.jobsystem.batchjob.model.BatchJob;
import com.paypal.jobsystem.batchjob.model.BatchJobContext;
import com.paypal.jobsystem.batchjob.services.BatchJobExecutor;
import com.paypal.jobsystem.batchjob.model.BatchJobItem;
import com.paypal.jobsystem.quartzadapter.job.QuartzBatchJobAdapter;
import com.paypal.jobsystem.quartzadapter.jobcontext.QuartzBatchJobContextFactory;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
class QuartzBatchJobAdapterTest {
@InjectMocks
private QuartzBatchJobAdapter testObj;
@Mock
private BatchJobExecutor batchJobExecutorMock;
@Mock
private BatchJob<BatchJobContext, BatchJobItem<?>> batchJobMock;
@Mock
private QuartzBatchJobContextFactory quartzBatchJobContextFactoryMock;
@Mock
private JobExecutionContext jobExecutionContextMock;
@Mock
private BatchJobContext batchJobContextMock;
@Test
void execute_ShouldExecuteAdaptedJob() throws JobExecutionException {
when(quartzBatchJobContextFactoryMock.getBatchJobContext(batchJobMock, jobExecutionContextMock))
.thenReturn(batchJobContextMock);
testObj.execute(jobExecutionContextMock);
verify(batchJobExecutorMock).execute(eq(batchJobMock), any(BatchJobContext.class));
}
}
| 4,691 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzadapter | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzadapter/job/QuartzBatchJobBuilderTest.java | package com.paypal.jobsystem.quartzadapter.job;
import com.paypal.jobsystem.batchjob.model.BatchJob;
import com.paypal.jobsystem.quartzadapter.job.QuartzBatchJobBean;
import com.paypal.jobsystem.quartzadapter.job.QuartzBatchJobBuilder;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.quartz.JobDetail;
import static org.assertj.core.api.Assertions.assertThat;
@ExtendWith(MockitoExtension.class)
class QuartzBatchJobBuilderTest {
private QuartzBatchJobBuilder testObj;
@Mock
BatchJob batchJobMock;
@Test
void shouldBuildQuartzBatchJob() {
final JobDetail result = QuartzBatchJobBuilder.newJob(batchJobMock).build();
assertThat(result.getJobClass()).isEqualTo(QuartzBatchJobBean.class);
assertThat(result.getJobDataMap()).containsEntry(QuartzBatchJobBean.KEY_BATCH_JOB_BEAN, batchJobMock);
}
}
| 4,692 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzadapter | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzadapter/job/QuartzBatchJobBeanTest.java | package com.paypal.jobsystem.quartzadapter.job;
import com.paypal.jobsystem.batchjob.model.BatchJob;
import com.paypal.jobsystem.batchjob.model.BatchJobContext;
import com.paypal.jobsystem.batchjob.model.BatchJobItem;
import com.paypal.jobsystem.quartzadapter.job.QuartzBatchJobAdapter;
import com.paypal.jobsystem.quartzadapter.job.QuartzBatchJobAdapterFactory;
import com.paypal.jobsystem.quartzadapter.job.QuartzBatchJobBean;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.quartz.JobDataMap;
import org.quartz.JobDetail;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
class QuartzBatchJobBeanTest {
@InjectMocks
private QuartzBatchJobBean testObj;
@Mock
private JobExecutionContext jobExecutionContextMock;
@Mock
private QuartzBatchJobAdapterFactory quartzBatchJobAdapterFactory;
@Mock
private BatchJob<BatchJobContext, BatchJobItem<?>> batchJobMock;
@Mock
private QuartzBatchJobAdapter quartzBatchJobAdapterMock;
@Mock
private JobDetail jobDetailMock;
@Mock
private JobDataMap jobDataMapMock;
@Test
void executeInternal_shouldExecuteBatchJob() throws JobExecutionException {
when(quartzBatchJobAdapterFactory.getQuartzJob(batchJobMock)).thenReturn(quartzBatchJobAdapterMock);
testObj.executeInternal(jobExecutionContextMock);
verify(quartzBatchJobAdapterMock).execute(jobExecutionContextMock);
}
@Test
void getBatchJobClass_shouldReturnJobDataMapBatchJobBeanClass() {
when(jobExecutionContextMock.getJobDetail()).thenReturn(jobDetailMock);
when(jobDetailMock.getJobDataMap()).thenReturn(jobDataMapMock);
when(jobDataMapMock.get(QuartzBatchJobBean.KEY_BATCH_JOB_BEAN)).thenReturn(batchJobMock);
assertThat(QuartzBatchJobBean.getBatchJobClass(jobExecutionContextMock)).isEqualTo(batchJobMock.getClass());
}
}
| 4,693 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzadapter | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzadapter/job/QuartzBatchJobAdapterFactoryImplTest.java | package com.paypal.jobsystem.quartzadapter.job;
import com.paypal.jobsystem.batchjob.model.BatchJob;
import com.paypal.jobsystem.batchjob.model.BatchJobContext;
import com.paypal.jobsystem.batchjob.services.BatchJobExecutor;
import com.paypal.jobsystem.batchjob.model.BatchJobItem;
import com.paypal.jobsystem.quartzadapter.job.QuartzBatchJobAdapter;
import com.paypal.jobsystem.quartzadapter.job.QuartzBatchJobAdapterFactoryImpl;
import com.paypal.jobsystem.quartzadapter.jobcontext.QuartzBatchJobContextFactory;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.mockito.InjectMocks;
import org.quartz.Job;
import static org.assertj.core.api.Assertions.assertThat;
@ExtendWith(MockitoExtension.class)
class QuartzBatchJobAdapterFactoryImplTest {
@InjectMocks
private QuartzBatchJobAdapterFactoryImpl testObj;
@Mock
private BatchJobExecutor batchJobExecutor;
@Mock
private QuartzBatchJobContextFactory quartzBatchJobContextFactory;
@Mock
private BatchJob<BatchJobContext, BatchJobItem<?>> batchJobMock;
@Test
void getQuartzJob_ShouldCreateAdaptedBatchJob() {
final Job result = testObj.getQuartzJob(batchJobMock);
assertThat(result).isNotNull().isInstanceOf(QuartzBatchJobAdapter.class);
}
}
| 4,694 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzadapter | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzadapter/jobcontext/QuartzBatchJobContextFactoryImplTest.java | package com.paypal.jobsystem.quartzadapter.jobcontext;
import com.paypal.jobsystem.batchjob.model.BatchJob;
import com.paypal.jobsystem.batchjob.model.BatchJobContext;
import com.paypal.jobsystem.quartzadapter.jobcontext.QuartzBatchJobContextAdapter;
import com.paypal.jobsystem.quartzadapter.jobcontext.QuartzBatchJobContextFactoryImpl;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Spy;
import org.mockito.junit.jupiter.MockitoExtension;
import org.quartz.JobDataMap;
import org.quartz.JobDetail;
import org.quartz.JobExecutionContext;
import org.quartz.JobKey;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.*;
@ExtendWith(MockitoExtension.class)
class QuartzBatchJobContextFactoryImplTest {
@InjectMocks
@Spy
private QuartzBatchJobContextFactoryImpl testObj;
@Mock
private JobExecutionContext jobExecutionContextMock;
@Mock
private JobDetail jobDetailMock;
@Mock
private JobDataMap jobDataMapMock;
@Mock
private BatchJob batchJobMock;
@Test
void getBatchJobContext_ShouldCreateBatchJobContext_ForScheduledJobs() {
when(jobExecutionContextMock.getJobDetail()).thenReturn(jobDetailMock);
when(jobDetailMock.getJobDataMap()).thenReturn(jobDataMapMock);
when(jobDetailMock.getKey()).thenReturn(new JobKey("JobName"));
final BatchJobContext result = testObj.getBatchJobContext(batchJobMock, jobExecutionContextMock);
assertThat(result).isNotNull();
verify(jobDataMapMock, times(1)).put(eq(QuartzBatchJobContextAdapter.KEY_BATCH_JOB_EXECUTION_UUID), any());
verify(jobDataMapMock, times(1)).put(QuartzBatchJobContextAdapter.KEY_BATCH_JOB, batchJobMock);
verify(jobDataMapMock, times(1)).put(eq(QuartzBatchJobContextAdapter.KEY_BATCH_JOB_NAME),
argThat(x -> x.contains(batchJobMock.getClass().getSimpleName()) && !x.contains("MANUAL")));
}
@Test
void getBatchJobContext_ShouldCreateBatchJobContext_ForManualJobs() {
when(jobExecutionContextMock.getJobDetail()).thenReturn(jobDetailMock);
when(jobDetailMock.getJobDataMap()).thenReturn(jobDataMapMock);
when(jobDetailMock.getKey()).thenReturn(new JobKey("JobName_1234"));
final BatchJobContext result = testObj.getBatchJobContext(batchJobMock, jobExecutionContextMock);
assertThat(result).isNotNull();
verify(jobDataMapMock, times(1)).put(eq(QuartzBatchJobContextAdapter.KEY_BATCH_JOB_EXECUTION_UUID), any());
verify(jobDataMapMock, times(1)).put(QuartzBatchJobContextAdapter.KEY_BATCH_JOB, batchJobMock);
verify(jobDataMapMock, times(1)).put(eq(QuartzBatchJobContextAdapter.KEY_BATCH_JOB_NAME),
argThat(x -> x.contains(batchJobMock.getClass().getSimpleName()) && x.contains("#MANUAL#1234")));
}
}
| 4,695 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzadapter | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/quartzadapter/jobcontext/QuartzBatchJobContextAdapterTest.java | package com.paypal.jobsystem.quartzadapter.jobcontext;
import com.paypal.jobsystem.batchjob.model.BatchJobContext;
import com.paypal.jobsystem.batchjob.model.BatchJobStatus;
import com.paypal.jobsystem.quartzadapter.jobcontext.QuartzBatchJobContextAdapter;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.quartz.JobDataMap;
import org.quartz.JobDetail;
import org.quartz.JobExecutionContext;
import org.quartz.JobKey;
import java.util.UUID;
import static com.paypal.jobsystem.quartzadapter.jobcontext.QuartzBatchJobContextAdapter.KEY_BATCH_JOB_EXECUTION_UUID;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.*;
@ExtendWith(MockitoExtension.class)
class QuartzBatchJobContextAdapterTest {
private static final String KEY_BATCH_JOB_STATUS = "batchJobStatus";
private static final String KEY_NUMBER_OF_ITEMS_PROCESSED = "numberOfItemsProcessed";
private static final String KEY_NUMBER_OF_ITEMS_FAILED = "numberOfItemsFailed";
private static final String KEY_NUMBER_OF_ITEMS_TO_BE_PROCESSED = "numberOfItemsToBeProcessed";
private static final String KEY_BATCH_JOB_NAME = "batchJobName";
private static final String JOB_NAME = "JobName";
public static final int NUMBER_OF_ITEMS_PROCESSED = 22;
public static final int NUMBER_OF_ITEMS_FAILED = 2;
public static final int NUMBER_OF_ITEMS_TO_BE_PROCESSED = 24;
private QuartzBatchJobContextAdapter testObj;
@Mock
private JobExecutionContext jobExecutionContextMock;
@Mock
private JobDetail jobDetailMock;
@Mock
private JobDataMap jobDataMapMock;
@Mock
private JobKey jobKeyMock;
@BeforeEach
public void setUp() {
lenient().when(jobExecutionContextMock.getJobDetail()).thenReturn(jobDetailMock);
lenient().doReturn(BatchJobContext.class).when(jobDetailMock).getJobClass();
lenient().when(jobDetailMock.getJobDataMap()).thenReturn(jobDataMapMock);
lenient().when(jobDataMapMock.get(KEY_BATCH_JOB_STATUS)).thenReturn(BatchJobStatus.FINISHED);
lenient().when(jobDataMapMock.get(KEY_NUMBER_OF_ITEMS_PROCESSED)).thenReturn(NUMBER_OF_ITEMS_PROCESSED);
lenient().when(jobDataMapMock.get(KEY_NUMBER_OF_ITEMS_FAILED)).thenReturn(NUMBER_OF_ITEMS_FAILED);
lenient().when(jobDataMapMock.get(KEY_NUMBER_OF_ITEMS_TO_BE_PROCESSED))
.thenReturn(NUMBER_OF_ITEMS_TO_BE_PROCESSED);
lenient().when(jobDataMapMock.get(KEY_BATCH_JOB_NAME)).thenReturn(JOB_NAME);
testObj = new QuartzBatchJobContextAdapter(jobExecutionContextMock);
}
@Test
void getJobName_ShouldReturnJobName() {
final String result = testObj.getJobName();
assertThat(result).isEqualTo(JOB_NAME);
}
@Test
void initializeBatchJobUuid_ShouldGenerateUUID() {
testObj.initializeBatchJobUuid();
verify(jobDataMapMock).put(eq(KEY_BATCH_JOB_EXECUTION_UUID), argThat(x -> UUID.fromString(x) != null));
}
@Test
void setNumberOfItemsToBeProcessed_ShouldSetNumberOfItemsToBeProcessedInJobDataMap() {
testObj.setNumberOfItemsToBeProcessed(NUMBER_OF_ITEMS_TO_BE_PROCESSED);
verify(jobDataMapMock).put(KEY_NUMBER_OF_ITEMS_TO_BE_PROCESSED,
Integer.valueOf(NUMBER_OF_ITEMS_TO_BE_PROCESSED));
}
@Test
void getNumberOfItemsToBeProcessed_ShouldReturnTheNumberOfItemsToBeProcessed() {
final int result = testObj.getNumberOfItemsToBeProcessed();
assertThat(result).isEqualTo(NUMBER_OF_ITEMS_TO_BE_PROCESSED);
}
@Test
void getNumberOfItemsFailed_ShouldReturnTheNumberOfItemsFailed() {
final int result = testObj.getNumberOfItemsFailed();
assertThat(result).isEqualTo(NUMBER_OF_ITEMS_FAILED);
}
@Test
void incrementFailedItems_ShouldIncrementFailedItems() {
testObj.incrementFailedItems();
verify(jobDataMapMock).put(KEY_NUMBER_OF_ITEMS_FAILED, Integer.valueOf(NUMBER_OF_ITEMS_FAILED + 1));
}
@Test
void getNumberOfItemsProcessed_ShouldReturnTheNumberOfItemsProcessed() {
final int result = testObj.getNumberOfItemsProcessed();
assertThat(result).isEqualTo(NUMBER_OF_ITEMS_PROCESSED);
}
@Test
void getNumberOfItemsRemaining_ShouldReturnTheNumberOfItemsRemaining() {
final int result = testObj.getNumberOfItemsRemaining();
assertThat(result).isZero();
}
@Test
void incrementProcessedItems_ShouldIncrementProcessedItems() {
testObj.incrementProcessedItems();
verify(jobDataMapMock).put(KEY_NUMBER_OF_ITEMS_PROCESSED, Integer.valueOf(NUMBER_OF_ITEMS_PROCESSED + 1));
}
@Test
void setRunningStatus_ShouldSetRunningStatusInJobDataMap() {
testObj.setRunningStatus();
verify(jobDataMapMock).put(KEY_BATCH_JOB_STATUS, BatchJobStatus.RUNNING);
}
@Test
void setFinishedStatus_ShouldSetFinishedStatusInJobDataMap() {
testObj.setFinishedStatus();
verify(jobDataMapMock).put(KEY_BATCH_JOB_STATUS, BatchJobStatus.FINISHED);
}
@Test
void setFinishedWithFailureStatus_ShouldSetFinishedStatusInJobDataMap() {
testObj.setFinishedWithFailuresStatus();
verify(jobDataMapMock).put(KEY_BATCH_JOB_STATUS, BatchJobStatus.FINISHED_WITH_FAILURES);
}
@Test
void setFailedStatus_ShouldSetFailedStatusInJobDataMap() {
testObj.setFailedStatus();
verify(jobDataMapMock).put(KEY_BATCH_JOB_STATUS, BatchJobStatus.FAILED);
}
@Test
void getStatus_ShouldReturnRunning_WhenCurrentStatusIsRunningAndNumberOfItemsFailedIsZero() {
when(jobDataMapMock.get(KEY_NUMBER_OF_ITEMS_FAILED)).thenReturn(0);
when(jobDataMapMock.get(KEY_BATCH_JOB_STATUS)).thenReturn(BatchJobStatus.RUNNING);
final BatchJobStatus result = testObj.getStatus();
assertThat(result).isEqualTo(BatchJobStatus.RUNNING);
}
@Test
void getStatus_ShouldReturnRunningWithFailures_WhenCurrentStatusIsRunningAndNumberOfItemsFailedIsGreaterThanZero() {
when(jobDataMapMock.get(KEY_NUMBER_OF_ITEMS_FAILED)).thenReturn(NUMBER_OF_ITEMS_FAILED);
when(jobDataMapMock.get(KEY_BATCH_JOB_STATUS)).thenReturn(BatchJobStatus.RUNNING);
final BatchJobStatus result = testObj.getStatus();
assertThat(result).isEqualTo(BatchJobStatus.RUNNING_WITH_FAILURES);
}
@Test
void getStatus_ShouldReturnFinished_WhenCurrentStatusIsFinishedAndNumberOfItemsFailedIsZero() {
when(jobDataMapMock.get(KEY_NUMBER_OF_ITEMS_FAILED)).thenReturn(0);
when(jobDataMapMock.get(KEY_BATCH_JOB_STATUS)).thenReturn(BatchJobStatus.FINISHED);
final BatchJobStatus result = testObj.getStatus();
assertThat(result).isEqualTo(BatchJobStatus.FINISHED);
}
@Test
void getStatus_ShouldReturnFinishedWithFailures_WhenCurrentStatusIsFinishedAndNumberOfItemsFailedIsGreaterThanZero() {
when(jobDataMapMock.get(KEY_NUMBER_OF_ITEMS_FAILED)).thenReturn(NUMBER_OF_ITEMS_FAILED);
when(jobDataMapMock.get(KEY_BATCH_JOB_STATUS)).thenReturn(BatchJobStatus.FINISHED);
final BatchJobStatus result = testObj.getStatus();
assertThat(result).isEqualTo(BatchJobStatus.FINISHED_WITH_FAILURES);
}
@Test
void getStatus_ShouldReturnCurrentStatus_WhenCurrentStatusIsNotFinishedOrRunning() {
when(jobDataMapMock.get(KEY_BATCH_JOB_STATUS)).thenReturn(BatchJobStatus.FAILED);
final BatchJobStatus result = testObj.getStatus();
assertThat(result).isEqualTo(BatchJobStatus.FAILED);
}
@Test
void getJobExecutionContext_ShouldReturnJobExecutionContext() {
final JobExecutionContext result = testObj.getJobExecutionContext();
assertThat(result).isEqualTo(jobExecutionContextMock);
}
@Test
void resetCounters_ShouldItemsResetCounters() {
testObj.resetCounters();
verify(jobDataMapMock).put(KEY_NUMBER_OF_ITEMS_PROCESSED, Integer.valueOf(0));
verify(jobDataMapMock).put(KEY_NUMBER_OF_ITEMS_FAILED, Integer.valueOf(0));
verify(jobDataMapMock).put(KEY_NUMBER_OF_ITEMS_TO_BE_PROCESSED, Integer.valueOf(0));
}
}
| 4,696 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobsupport | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobsupport/support/AbstractDynamicWindowDeltaBatchJobItemsExtractorTest.java | package com.paypal.jobsystem.batchjobsupport.support;
import com.paypal.infrastructure.support.date.DateUtil;
import com.paypal.infrastructure.support.date.TimeMachine;
import com.paypal.jobsystem.batchjob.model.BatchJobContext;
import com.paypal.jobsystem.batchjob.model.BatchJobItem;
import com.paypal.jobsystem.batchjobaudit.repositories.entities.BatchJobTrackInfoEntity;
import com.paypal.jobsystem.batchjobaudit.services.BatchJobTrackingService;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.*;
import org.mockito.junit.jupiter.MockitoExtension;
import java.time.LocalDateTime;
import java.util.Collection;
import java.util.Date;
import java.util.List;
import java.util.Optional;
import static java.time.ZoneOffset.UTC;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.*;
@ExtendWith(MockitoExtension.class)
class AbstractDynamicWindowDeltaBatchJobItemsExtractorTest {
public static final int EXTRACTION_MAX_DAYS = 30;
@Spy
@InjectMocks
private MyDynamicWindowDeltaBatchJobItemsExtractor testObj;
@Mock
private BatchJobTrackingService batchJobTrackingServiceMock;
@Mock(answer = Answers.RETURNS_DEEP_STUBS)
private BatchJobContext batchJobContextMock;
@Mock
private BatchJobTrackInfoEntity batchJobTrackInfoEntityMock;
@Captor
private ArgumentCaptor<LocalDateTime> searchJobFromArgumentCaptor;
@Test
void getCalculatedDelta_ShouldReturnDeltaFromService() {
TimeMachine.useFixedClockAt(LocalDateTime.now());
testObj.extractionMaxDays = EXTRACTION_MAX_DAYS;
when(batchJobContextMock.getJobName()).thenReturn("JOBNAME");
when(batchJobTrackingServiceMock.findLastJobExecutionWithNonEmptyExtraction(eq("JOBNAME"),
any(LocalDateTime.class))).thenReturn(Optional.of(batchJobTrackInfoEntityMock));
when(batchJobTrackInfoEntityMock.getStartTime()).thenReturn(TimeMachine.now());
final Date result = testObj.getCalculatedDelta(batchJobContextMock);
verify(batchJobTrackingServiceMock).findLastJobExecutionWithNonEmptyExtraction(eq("JOBNAME"),
searchJobFromArgumentCaptor.capture());
assertThat(result).isEqualTo(DateUtil.convertToDate(TimeMachine.now(), UTC));
assertThat(searchJobFromArgumentCaptor.getValue()).isEqualTo(TimeMachine.now().minusDays(EXTRACTION_MAX_DAYS));
}
@Test
void getCalculatedDelta_ShouldReturnExtractionMaxDays_WhenServiceDontFindResults() {
TimeMachine.useFixedClockAt(LocalDateTime.now());
testObj.extractionMaxDays = EXTRACTION_MAX_DAYS;
when(batchJobContextMock.getJobName()).thenReturn("JOBNAME");
when(batchJobTrackingServiceMock.findLastJobExecutionWithNonEmptyExtraction(eq("JOBNAME"),
any(LocalDateTime.class))).thenReturn(Optional.empty());
final Date result = testObj.getCalculatedDelta(batchJobContextMock);
verify(batchJobTrackingServiceMock).findLastJobExecutionWithNonEmptyExtraction(eq("JOBNAME"),
searchJobFromArgumentCaptor.capture());
final Date expectedDate = DateUtil.convertToDate(TimeMachine.now().minusDays(EXTRACTION_MAX_DAYS), UTC);
assertThat(result).isEqualTo(expectedDate);
}
private static class MyDynamicWindowDeltaBatchJobItemsExtractor
extends AbstractDynamicWindowDeltaBatchJobItemsExtractor<BatchJobContext, BatchJobItem<Object>> {
private MyDynamicWindowDeltaBatchJobItemsExtractor(final BatchJobTrackingService batchJobTrackingService) {
super(batchJobTrackingService);
}
@Override
protected Collection<BatchJobItem<Object>> getItems(final BatchJobContext ctx, final Date delta) {
return List.of();
}
}
}
| 4,697 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobsupport | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobsupport/support/AbstractFixedWindowDeltaBatchJobItemsExtractorTest.java | package com.paypal.jobsystem.batchjobsupport.support;
import com.paypal.jobsystem.batchjob.model.BatchJobContext;
import com.paypal.jobsystem.batchjob.model.BatchJobItem;
import com.paypal.testsupport.TestDateUtil;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import java.util.Collection;
import java.util.Date;
import static org.assertj.core.api.Assertions.assertThat;
@ExtendWith(MockitoExtension.class)
public class AbstractFixedWindowDeltaBatchJobItemsExtractorTest {
public static final long RESYNC_MAX_DAYS = 90;
@InjectMocks
private MyFixedWindowDeltaBatchJobItemsExtractor testObj;
@Mock
private BatchJobContext batchJobContextMock;
@Test
void getCalculatedDelta_shouldReturnTodayMinusResyncMaxDays_whenThereIsNoError() {
testObj.resyncMaxDays = RESYNC_MAX_DAYS;
final Date result = testObj.getCalculatedDelta(batchJobContextMock);
final Date minus90MinusSeconds = TestDateUtil.currentDateMinusDaysPlusSeconds(RESYNC_MAX_DAYS, -100);
final Date minus90 = TestDateUtil.currentDateMinusDaysPlusSeconds(RESYNC_MAX_DAYS, 10);
assertThat(result).isBetween(minus90MinusSeconds, minus90);
}
private static class MyFixedWindowDeltaBatchJobItemsExtractor
extends AbstractFixedWindowDeltaBatchJobItemsExtractor<BatchJobContext, BatchJobItem<Object>> {
@Override
protected Collection<BatchJobItem<Object>> getItems(final BatchJobContext ctx, final Date delta) {
return null;
}
}
}
| 4,698 |
0 | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobsupport | Create_ds/mirakl-hyperwallet-connector/hmc-jobsystem/src/test/java/com/paypal/jobsystem/batchjobsupport/support/AbstractBatchJobTest.java | package com.paypal.jobsystem.batchjobsupport.support;
import com.paypal.jobsystem.batchjob.model.BatchJobContext;
import com.paypal.jobsystem.batchjob.model.BatchJobItem;
import com.paypal.jobsystem.batchjobsupport.model.BatchJobItemProcessor;
import com.paypal.jobsystem.batchjobsupport.model.BatchJobItemsExtractor;
import com.paypal.jobsystem.batchjobsupport.support.AbstractExtractBatchJob;
import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import java.util.Collection;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.*;
@ExtendWith(MockitoExtension.class)
class AbstractBatchJobTest {
@InjectMocks
private MyAbstractBatchJob testObj;
@Mock
private BatchJobItemsExtractor<BatchJobContext, BatchJobItem<Object>> batchJobItemsExtractorMock;
@Mock
private BatchJobItemProcessor<BatchJobContext, BatchJobItem<Object>> batchJobItemProcessorMock;
@Mock
private BatchJobContext batchJobContextMock;
@Mock
private Collection<BatchJobItem<Object>> batchJobItemsMock;
@Mock
private BatchJobItem<Object> batchJobItemMock;
@Test
void getItems_shouldCallItemsExtractor() {
when(batchJobItemsExtractorMock.getItems(batchJobContextMock)).thenReturn(batchJobItemsMock);
final Collection<BatchJobItem<Object>> result = testObj.getItems(batchJobContextMock);
assertThat(result).isEqualTo(batchJobItemsMock);
}
@Test
void processItem_shouldCallItemsProcessor() {
testObj.processItem(batchJobContextMock, batchJobItemMock);
verify(batchJobItemProcessorMock, times(1)).processItem(batchJobContextMock, batchJobItemMock);
}
@Test
void getBatchJobItemValidator_shouldReturnEmptyValue() {
assertThat(testObj.getBatchJobItemValidator()).isEmpty();
}
@Test
void getBatchJobPreProcessor_shouldReturnEmptyValue() {
assertThat(testObj.getBatchJobPreProcessor()).isEmpty();
}
@Test
void getBatchJobItemEnricher_shouldReturnEmptyValue() {
assertThat(testObj.getBatchJobItemEnricher()).isEmpty();
}
private static class MyAbstractBatchJob extends AbstractExtractBatchJob<BatchJobContext, BatchJobItem<Object>> {
private final BatchJobItemProcessor<BatchJobContext, BatchJobItem<Object>> itemBatchJobItemProcessor;
private final BatchJobItemsExtractor<BatchJobContext, BatchJobItem<Object>> batchJobItemBatchJobItemsExtractor;
private MyAbstractBatchJob(
final BatchJobItemProcessor<BatchJobContext, BatchJobItem<Object>> itemBatchJobItemProcessor,
final BatchJobItemsExtractor<BatchJobContext, BatchJobItem<Object>> batchJobItemBatchJobItemsExtractor) {
this.itemBatchJobItemProcessor = itemBatchJobItemProcessor;
this.batchJobItemBatchJobItemsExtractor = batchJobItemBatchJobItemsExtractor;
}
@Override
protected BatchJobItemProcessor<BatchJobContext, BatchJobItem<Object>> getBatchJobItemProcessor() {
return itemBatchJobItemProcessor;
}
@Override
protected BatchJobItemsExtractor<BatchJobContext, BatchJobItem<Object>> getBatchJobItemsExtractor() {
return batchJobItemBatchJobItemsExtractor;
}
}
}
| 4,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.