index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/strategy/FileBasedOffsetCheckpointStrategy.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.strategy;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.module.SimpleModule;
import com.fasterxml.jackson.datatype.jdk8.Jdk8Module;
import com.google.common.base.Strings;
import io.mantisrx.connector.kafka.source.serde.OffsetAndMetadataDeserializer;
import io.mantisrx.connector.kafka.source.serde.OffsetAndMetadataSerializer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.mantisrx.runtime.Context;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicReference;
/**
* DO NOT USE IN PRODUCTION. This strategy is created only for unit test purposes and demonstrates using an alternative
* storage backend for committing topic partition offsets.
*/
public class FileBasedOffsetCheckpointStrategy implements CheckpointStrategy<OffsetAndMetadata> {
private static final Logger LOGGER = LoggerFactory.getLogger(FileBasedOffsetCheckpointStrategy.class);
private static final ObjectMapper MAPPER = new ObjectMapper()
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
static {
MAPPER.registerModule(new Jdk8Module());
SimpleModule offsetAndMetadataModule = new SimpleModule();
offsetAndMetadataModule.addSerializer(OffsetAndMetadata.class, new OffsetAndMetadataSerializer());
offsetAndMetadataModule.addDeserializer(OffsetAndMetadata.class, new OffsetAndMetadataDeserializer());
MAPPER.registerModule(offsetAndMetadataModule);
}
public static final String DEFAULT_CHECKPOINT_DIR = "/tmp/FileBasedOffsetCheckpointStrategy";
public static final String CHECKPOINT_DIR_PROP = "checkpointDirectory";
private final AtomicReference<String> checkpointDir = new AtomicReference<>(null);
private String filePath(final TopicPartition tp) {
return checkpointDir.get() + "/" + tp.topic().concat("-").concat(String.valueOf(tp.partition()));
}
@Override
public void init(final Context context) {
String checkptDir = (String) context.getParameters().get(CHECKPOINT_DIR_PROP);
checkpointDir.compareAndSet(null, checkptDir);
createDirectoryIfDoesNotExist(checkpointDir.get());
}
@Override
public boolean persistCheckpoint(Map<TopicPartition, OffsetAndMetadata> checkpoint) {
for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : checkpoint.entrySet()) {
final TopicPartition tp = entry.getKey();
final Path filePath = Paths.get(filePath(tp));
try {
if (Files.notExists(filePath)) {
LOGGER.info("file {} does not exist, creating one", filePath);
Files.createFile(filePath);
}
Files.write(filePath, Collections.singletonList(MAPPER.writeValueAsString(entry.getValue())));
} catch (IOException e) {
LOGGER.error("error writing checkpoint {} to file {}", entry.getValue(), filePath, e);
throw new RuntimeException(e);
}
}
return true;
}
@Override
public Optional<OffsetAndMetadata> loadCheckpoint(TopicPartition tp) {
try {
final List<String> lines = Files.readAllLines(Paths.get(filePath(tp)));
if (!lines.isEmpty()) {
final String checkpointString = lines.get(0);
LOGGER.info("read from file {}", checkpointString);
return Optional.ofNullable(MAPPER.readValue(checkpointString, OffsetAndMetadata.class));
}
} catch (IOException e) {
LOGGER.error("error loading checkpoint from file {}", filePath(tp), e);
}
return Optional.empty();
}
@Override
public void init(Map<String, String> properties) {
if (!properties.containsKey(CHECKPOINT_DIR_PROP) || Strings.isNullOrEmpty(properties.get(CHECKPOINT_DIR_PROP))) {
throw new IllegalArgumentException("missing required property " + CHECKPOINT_DIR_PROP);
}
String checkptDir = properties.get(CHECKPOINT_DIR_PROP);
checkpointDir.compareAndSet(null, checkptDir);
createDirectoryIfDoesNotExist(checkpointDir.get());
}
private void createDirectoryIfDoesNotExist(String dir) {
if (Files.notExists(Paths.get(dir))) {
LOGGER.info("file {} does not exist, creating one", dir);
try {
Files.createDirectory(Paths.get(dir));
} catch (IOException e) {
LOGGER.error("failed to create checkpoint directory {}", dir);
throw new RuntimeException(e);
}
}
}
@Override
public Map<TopicPartition, Optional<OffsetAndMetadata>> loadCheckpoints(
List<TopicPartition> tpList) {
Map<TopicPartition, Optional<OffsetAndMetadata>> tpChkMap = new HashMap<>();
for (TopicPartition tp : tpList) {
tpChkMap.put(tp, loadCheckpoint(tp));
}
return tpChkMap;
}
@Override
public String type() {
return CheckpointStrategyOptions.FILE_BASED_OFFSET_CHECKPOINTING;
}
}
| 4,500 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/strategy/NoopCheckpointStrategy.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.strategy;
import org.apache.kafka.common.TopicPartition;
import io.mantisrx.runtime.Context;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
public class NoopCheckpointStrategy implements CheckpointStrategy<Void> {
@Override
public void init(Map<String, String> properties) {
}
@Override
public boolean persistCheckpoint(Map<TopicPartition, Void> checkpoint) {
return true;
}
@Override
public Optional<Void> loadCheckpoint(TopicPartition tp) {
return Optional.empty();
}
@Override
public void init(Context context) {
// no-op
}
@Override
public Map<TopicPartition, Optional<Void>> loadCheckpoints(
List<TopicPartition> tpList) {
return Collections.emptyMap();
}
@Override
public String type() {
return CheckpointStrategyOptions.NONE;
}
}
| 4,501 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/strategy/CheckpointStrategyOptions.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.strategy;
public final class CheckpointStrategyOptions {
/**
* Leverages Kafka for committing offsets.
*/
public static final String OFFSETS_ONLY_DEFAULT = "offsetsOnlyDefaultKafka";
/**
* Sample strategy for storing Offsets outside Kafka to a File based storage, this is only used for Unit testing.
*/
public static final String FILE_BASED_OFFSET_CHECKPOINTING = "fileBasedOffsetCheckpointing";
/**
* Default CheckpointStrategy to disable committing offsets, note this would disable atleast once semantics as
* offsets are no longer committed to resume from after a worker/process failure.
*/
public static final String NONE = "disableCheckpointing";
private CheckpointStrategyOptions() {
}
public static String values() {
return OFFSETS_ONLY_DEFAULT + ", " + FILE_BASED_OFFSET_CHECKPOINTING + ", " + NONE;
}
}
| 4,502 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/strategy/KafkaOffsetCheckpointStrategy.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.strategy;
import io.mantisrx.connector.kafka.source.metrics.ConsumerMetrics;
import org.apache.kafka.clients.consumer.InvalidOffsetException;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.mantisrx.runtime.Context;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
/**
* Leverages the default Kafka facilities to commit offsets to Kafka using {@link KafkaConsumer#commitSync(Map) commitSync(Map)}.
*/
public class KafkaOffsetCheckpointStrategy implements CheckpointStrategy<OffsetAndMetadata> {
private static Logger logger = LoggerFactory.getLogger(KafkaOffsetCheckpointStrategy.class);
private final KafkaConsumer<?, ?> consumer;
private final ConsumerMetrics consumerMetrics;
public KafkaOffsetCheckpointStrategy(KafkaConsumer<?, ?> consumer, ConsumerMetrics metrics) {
this.consumer = consumer;
this.consumerMetrics = metrics;
}
@Override
public void init(Map<String, String> properties) {
}
@Override
public boolean persistCheckpoint(final Map<TopicPartition, OffsetAndMetadata> checkpoint) {
if (!checkpoint.isEmpty()) {
try {
logger.debug("committing offsets {}", checkpoint.toString());
consumer.commitSync(checkpoint);
consumerMetrics.recordCommittedOffset(checkpoint);
} catch (InvalidOffsetException ioe) {
logger.warn("failed to commit offsets " + checkpoint.toString() + " will seek to beginning", ioe);
final Set<TopicPartition> topicPartitionSet = ioe.partitions();
for (TopicPartition tp : topicPartitionSet) {
logger.info("partition " + tp.toString() + " consumer position " + consumer.position(tp));
}
consumer.seekToBeginning(ioe.partitions());
} catch (KafkaException cfe) {
// should not be retried
logger.warn("unrecoverable exception on commit offsets " + checkpoint.toString(), cfe);
return false;
}
}
return true;
}
@Override
public Optional<OffsetAndMetadata> loadCheckpoint(TopicPartition tp) {
logger.trace("rely on default kafka protocol to seek to last committed offset");
return Optional.empty();
}
@Override
public void init(Context context) {
// no-op
}
@Override
public Map<TopicPartition, Optional<OffsetAndMetadata>> loadCheckpoints(List<TopicPartition> tpList) {
Map<TopicPartition, Optional<OffsetAndMetadata>> mp = new HashMap<>();
for (TopicPartition tp : tpList) {
mp.put(tp, loadCheckpoint(tp));
}
return mp;
}
@Override
public String type() {
return CheckpointStrategyOptions.OFFSETS_ONLY_DEFAULT;
}
}
| 4,503 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/strategy/CheckpointStrategyFactory.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.strategy;
import io.mantisrx.connector.kafka.source.metrics.ConsumerMetrics;
import io.mantisrx.runtime.Context;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public final class CheckpointStrategyFactory {
private CheckpointStrategyFactory() { }
private static final Logger LOGGER = LoggerFactory.getLogger(CheckpointStrategyFactory.class);
/**
* Factory method to create instance of {@link CheckpointStrategy}
* @param context Mantis runtime context
* @param consumer Kafka consumer
* @param strategy checkpoint strategy string
* @param metrics consumer metrics
* @return instance of {@link CheckpointStrategy}
*/
public static CheckpointStrategy<?> getNewInstance(final Context context,
final KafkaConsumer<?, ?> consumer,
final String strategy,
final ConsumerMetrics metrics) {
switch (strategy) {
case CheckpointStrategyOptions.OFFSETS_ONLY_DEFAULT:
final KafkaOffsetCheckpointStrategy cs = new KafkaOffsetCheckpointStrategy(consumer, metrics);
cs.init(context);
return cs;
case CheckpointStrategyOptions.FILE_BASED_OFFSET_CHECKPOINTING:
final FileBasedOffsetCheckpointStrategy fcs = new FileBasedOffsetCheckpointStrategy();
LOGGER.info("initializing file checkpoint strategy");
fcs.init(context);
return fcs;
case CheckpointStrategyOptions.NONE:
default:
return new NoopCheckpointStrategy();
}
}
}
| 4,504 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/strategy/CheckpointStrategy.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.strategy;
import org.apache.kafka.common.TopicPartition;
import io.mantisrx.runtime.Context;
import java.util.List;
import java.util.Map;
import java.util.Optional;
public interface CheckpointStrategy<S> {
/**
* initialization when creating the strategy.
*/
void init(Context context);
/**
* initialization when creating the strategy.
*/
void init(Map<String, String> initParams);
/**
* persist checkpoint state by TopicPartition.
*
* @param checkpoint
* @return true on persist success, false otherwise
*/
boolean persistCheckpoint(Map<TopicPartition, S> checkpoint);
/**
* return the persisted checkpoint state for topic-partition (if exists).
*
* @param tp topic-partition
*
* @return CheckpointState if persisted, else empty Optional
*/
Optional<S> loadCheckpoint(TopicPartition tp);
/**
* Bulk API to Load checkpoints.
*
* @param tpList list of TopicPartitions to load checkpointState
* @return
*/
Map<TopicPartition, Optional<S>> loadCheckpoints(List<TopicPartition> tpList);
/**
* Get checkpoint strategy type, one of {@link CheckpointStrategyOptions}
* @return {@link CheckpointStrategyOptions checkpointStrategy} implemented
*/
String type();
}
| 4,505 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/assignor/StaticPartitionAssignorImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.assignor;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
* Is invoked during initialization of the KafkaSource if Static partitioning ins enabled.
*/
public class StaticPartitionAssignorImpl implements StaticPartitionAssignor {
private static final Logger LOGGER = LoggerFactory.getLogger(StaticPartitionAssignorImpl.class);
/**
* Does a simple round robin assignment of each TopicName-PartitionNumber combination to the list of consumers
* Returns only the assignments for the current consumer.
*
* @param consumerIndex Current workers consumerIndex
* @param topicPartitionCounts Map of topic -> no of partitions
* @param totalNumConsumers Total number of consumers
*
* @return
*/
@Override
public List<TopicPartition> assignPartitionsToConsumer(int consumerIndex, Map<String, Integer> topicPartitionCounts, int totalNumConsumers) {
Objects.requireNonNull(topicPartitionCounts, "TopicPartitionCount Map cannot be null");
if (consumerIndex < 0) {
throw new IllegalArgumentException("Consumer Index cannot be negative " + consumerIndex);
}
if (totalNumConsumers < 0) {
throw new IllegalArgumentException("Total Number of consumers cannot be negative " + totalNumConsumers);
}
if (consumerIndex >= totalNumConsumers) {
throw new IllegalArgumentException("Consumer Index " + consumerIndex + " cannot be greater than or equal to Total Number of consumers " + totalNumConsumers);
}
List<TopicPartition> topicPartitions = new ArrayList<>();
int currConsumer = 0;
for (Map.Entry<String, Integer> topicPartitionCount : topicPartitionCounts.entrySet()) {
final String topic = topicPartitionCount.getKey();
final Integer numPartitions = topicPartitionCount.getValue();
if (numPartitions <= 0) {
LOGGER.warn("Number of partitions is " + numPartitions + " for Topic " + topic + " skipping");
continue;
}
for (int i = 0; i < numPartitions; i++) {
if (currConsumer == totalNumConsumers) {
currConsumer = 0;
}
if (currConsumer == consumerIndex) {
topicPartitions.add(new TopicPartition(topic, i));
}
currConsumer++;
}
}
return topicPartitions;
}
}
| 4,506 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/assignor/StaticPartitionAssignor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.assignor;
import org.apache.kafka.common.TopicPartition;
import java.util.List;
import java.util.Map;
public interface StaticPartitionAssignor {
List<TopicPartition> assignPartitionsToConsumer(int consumerIndex,
Map<String, Integer> topicPartitionCounts,
int totalNumConsumers);
}
| 4,507 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/serde/MapDeserializerBase.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.serde;
import org.apache.kafka.common.serialization.Deserializer;
import java.util.Map;
public abstract class MapDeserializerBase implements Parser, Deserializer<Map<String, Object>> {
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
}
@Override
public Map<String, Object> deserialize(String topic, byte[] data) {
if (data == null)
return null;
else if (canParse(data))
return parseMessage(data);
else throw new UnsupportedOperationException("Message cannot be deserialized with parser");
}
@Override
public void close() {
}
}
| 4,508 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/serde/Parser.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.serde;
import java.io.UnsupportedEncodingException;
import java.nio.charset.StandardCharsets;
import java.util.Map;
public interface Parser {
/**
* Determine if the payload byte array is parsable.
*
* @param message
*
* @return boolean indicate if payload is parsable
*/
boolean canParse(byte[] message);
/**
* parse a payload byte array into a map.
*
* @param message
*
* @return map
*
* @throws ParseException
*/
Map<String, Object> parseMessage(byte[] message) throws ParseException;
/**
* Returns partial readable payload information, if encoding is not supported fallback to Base64.
*
* @param payload
*
* @return string message
*
* @throws UnsupportedEncodingException
*/
default String getPartialPayLoadForLogging(byte[] payload) {
String msg = new String(payload, StandardCharsets.UTF_8);
return msg.length() <= 128 ? msg : msg.substring(0, 127);
}
}
| 4,509 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/serde/OffsetAndMetadataDeserializer.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.serde;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.databind.DeserializationContext;
import com.fasterxml.jackson.databind.JsonDeserializer;
import com.fasterxml.jackson.databind.JsonNode;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import java.io.IOException;
public class OffsetAndMetadataDeserializer extends JsonDeserializer<OffsetAndMetadata> {
@Override
public OffsetAndMetadata deserialize(JsonParser p, DeserializationContext ctxt) throws IOException {
final JsonNode node = p.getCodec().readTree(p);
final long offset = node.get("offset").longValue();
final String metadata = node.get("metadata").textValue();
return new OffsetAndMetadata(offset, metadata);
}
}
| 4,510 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/serde/ParseException.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.serde;
public class ParseException extends RuntimeException {
/**
* genreated id
*/
private static final long serialVersionUID = 7066656417880807188L;
public ParseException(String message) {
super(message);
}
public ParseException(Throwable cause) {
super(cause);
}
public ParseException(String message, Throwable cause) {
super(message, cause);
}
}
| 4,511 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/serde/SimpleJsonDeserializer.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.serde;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Map;
public class SimpleJsonDeserializer extends MapDeserializerBase {
private final static Logger LOGGER = LoggerFactory.getLogger(SimpleJsonDeserializer.class);
private final ObjectMapper jsonMapper = new ObjectMapper();
private final com.fasterxml.jackson.core.type.TypeReference<Map<String, Object>> typeRef =
new com.fasterxml.jackson.core.type.TypeReference<Map<String, Object>>() {};
@Override
public boolean canParse(byte[] message) {
// no easy way of pre-determine if the json is valid without actually parsing it (unlike chaski format message).
// so we'll always assume the message can be parsed and move onto deserialization phase
return true;
}
@Override
public Map<String, Object> parseMessage(byte[] message) throws ParseException {
Map<String, Object> result;
try {
result = jsonMapper.readValue(message, typeRef);
} catch (Exception ex) {
LOGGER.error("Json parser failed to parse message! PAYLOAD:" + getPartialPayLoadForLogging(message), ex);
throw new ParseException("Json not able to parse raw message", ex);
}
return result;
}
}
| 4,512 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/serde/ParserType.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.serde;
/**
* Parser types supported for Kafka message payloads.
*/
public enum ParserType {
SIMPLE_JSON("simplejson", new SimpleJsonDeserializer());
private String propName;
private Parser parser;
ParserType(String propName, Parser parserInstance) {
this.propName = propName;
this.parser = parserInstance;
}
public String getPropName() {
return propName;
}
public Parser getParser() {
return parser;
}
public boolean equalsName(String otherName) {
return (otherName != null) && propName.equals(otherName);
}
@Override
public String toString() {
return this.propName;
}
public static ParserType parser(String parserType) {
if ("simplejson".equals(parserType)) {
return SIMPLE_JSON;
} else {
throw new IllegalArgumentException("Invalid parser type");
}
}
}
| 4,513 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/serde/OffsetAndMetadataSerializer.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.serde;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonSerializer;
import com.fasterxml.jackson.databind.SerializerProvider;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import java.io.IOException;
public class OffsetAndMetadataSerializer extends JsonSerializer<OffsetAndMetadata> {
@Override
public void serialize(OffsetAndMetadata oam, JsonGenerator gen, SerializerProvider serializers) throws IOException {
gen.writeStartObject();
gen.writeNumberField("offset", oam.offset());
gen.writeStringField("metadata", oam.metadata());
gen.writeEndObject();
}
}
| 4,514 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/sink/KafkaSinkJobParameters.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.sink;
public class KafkaSinkJobParameters {
public static final String PREFIX = "kafka.sink.producer.";
public static final String TOPIC = PREFIX + "topic";
}
| 4,515 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/sink/KafkaSink.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.sink;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
import com.netflix.spectator.api.Registry;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.Parameters;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.Metadata;
import io.mantisrx.runtime.PortRequest;
import io.mantisrx.runtime.sink.SelfDocumentingSink;
import rx.Observable;
import rx.functions.Func1;
import rx.schedulers.Schedulers;
public class KafkaSink<T> implements SelfDocumentingSink<T> {
private static final Logger logger = LoggerFactory.getLogger(KafkaSink.class);
private final Func1<T, byte[]> encoder;
private final Registry registry;
private final AtomicReference<KafkaProducer<byte[], byte[]>> kafkaProducerAtomicRef = new AtomicReference<>(null);
KafkaSink(Registry registry, Func1<T, byte[]> encoder) {
this.encoder = encoder;
this.registry = registry;
}
@Override
public void call(Context context, PortRequest ignore, Observable<T> dataO) {
if (kafkaProducerAtomicRef.get() == null) {
MantisKafkaProducerConfig mantisKafkaProducerConfig = new MantisKafkaProducerConfig(context);
Map<String, Object> producerProperties = mantisKafkaProducerConfig.getProducerProperties();
KafkaProducer<byte[], byte[]> kafkaProducer = new KafkaProducer<>(producerProperties);
kafkaProducerAtomicRef.compareAndSet(null, kafkaProducer);
logger.info("Kafka Producer initialized");
}
KafkaProducer<byte[], byte[]> kafkaProducer = kafkaProducerAtomicRef.get();
Parameters parameters = context.getParameters();
String topic = (String)parameters.get(KafkaSinkJobParameters.TOPIC);
dataO.map(encoder::call)
.flatMap((dataBytes) ->
Observable.from(kafkaProducer.send(new ProducerRecord<>(topic, dataBytes)))
.subscribeOn(Schedulers.io()))
.subscribe();
}
@Override
public List<ParameterDefinition<?>> getParameters() {
final List<ParameterDefinition<?>> params = new ArrayList<>();
params.add(new StringParameter()
.name(KafkaSinkJobParameters.TOPIC)
.description("Kafka topic to write to")
.validator(Validators.notNullOrEmpty())
.required()
.build());
params.addAll(MantisKafkaProducerConfig.getJobParameterDefinitions());
return params;
}
@Override
public Metadata metadata() {
StringBuilder description = new StringBuilder();
description.append("Writes the output of the job into the configured Kafka topic");
return new Metadata.Builder()
.name("Mantis Kafka Sink")
.description(description.toString())
.build();
}
}
| 4,516 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/sink/MantisKafkaProducerConfig.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.sink;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import org.apache.kafka.clients.producer.ProducerConfig;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.Parameters;
import org.apache.kafka.common.metrics.JmxReporter;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MantisKafkaProducerConfig extends ProducerConfig {
private static final Logger LOGGER = LoggerFactory.getLogger(MantisKafkaProducerConfig.class);
public static final String DEFAULT_BOOTSTRAP_SERVERS_CONFIG = "localhost:9092";
public static final String DEFAULT_ACKS_CONFIG = "all";
public static final int DEFAULT_RETRIES_CONFIG = 1;
public MantisKafkaProducerConfig(Map<String, Object> props,
Context context) {
super(applyJobParamOverrides(context, props));
}
public MantisKafkaProducerConfig(Context context) {
this(defaultProps(), context);
}
@Override
protected Map<String, Object> postProcessParsedConfig(Map<String, Object> parsedValues) {
return super.postProcessParsedConfig(parsedValues);
}
public static Map<String, Object> defaultProps() {
final Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, DEFAULT_BOOTSTRAP_SERVERS_CONFIG);
props.put(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, JmxReporter.class.getName());
props.put(ProducerConfig.ACKS_CONFIG, DEFAULT_ACKS_CONFIG);
props.put(ProducerConfig.RETRIES_CONFIG, DEFAULT_RETRIES_CONFIG);
return props;
}
private static Map<String, Object> applyJobParamOverrides(Context context, Map<String, Object> parsedValues) {
final Parameters parameters = context.getParameters();
Map<String, Object> defaultProps = defaultProps();
for (String key : configNames()) {
Object value = parameters.get(KafkaSinkJobParameters.PREFIX + key, null);
if (value != null) {
LOGGER.info("job param override for key {} -> {}", key, value);
parsedValues.put(key, value);
}
}
final String bootstrapBrokers = (String) parameters.get(KafkaSinkJobParameters.PREFIX + ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, defaultProps.get(BOOTSTRAP_SERVERS_CONFIG));
parsedValues.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapBrokers);
final String clientId = (String) parameters.get(KafkaSinkJobParameters.PREFIX + ProducerConfig.CLIENT_ID_CONFIG, context.getJobId());
parsedValues.put(ProducerConfig.CLIENT_ID_CONFIG, clientId);
return parsedValues;
}
public Map<String, Object> getProducerProperties() {
return values().entrySet().stream()
.filter(x -> x.getKey() != null && x.getValue() != null)
.collect(Collectors.toMap(x -> x.getKey(),
x -> (Object) x.getValue()));
}
/**
* Helper class to get all Kafka Producer configs as Job Parameters to allow overriding Kafka producer config settings at Job submit time.
*
* @return
*/
public static List<ParameterDefinition<?>> getJobParameterDefinitions() {
List<ParameterDefinition<?>> params = new ArrayList<>();
Map<String, Object> defaultProps = defaultProps();
for (String key : configNames()) {
ParameterDefinition.Builder<String> builder = new StringParameter()
.name(KafkaSinkJobParameters.PREFIX + key)
.validator(Validators.alwaysPass())
.description(KafkaSinkJobParameters.PREFIX + key);
if (defaultProps.containsKey(key)) {
Object value = defaultProps.get(key);
if (value instanceof Class) {
builder = builder.defaultValue(((Class) value).getCanonicalName());
} else {
builder = builder.defaultValue((String) value);
}
}
params.add(builder.build());
}
return params;
}
}
| 4,517 |
0 | Create_ds/geode-examples/putall/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/putall/src/main/java/org/apache/geode_examples/putall/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.putall;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.stream.IntStream;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
public class Example {
private final Region<Integer, String> region;
public Example(Region<Integer, String> region) {
this.region = region;
}
public static void main(String[] args) {
// connect to the locator using default port 10334
ClientCache cache = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.set("log-level", "WARN").create();
// create a local region that matches the server region
Region<Integer, String> region =
cache.<Integer, String>createClientRegionFactory(ClientRegionShortcut.PROXY)
.create("example-region");
Example example = new Example(region);
example.insertValues(10);
example.printValues(example.getValues());
cache.close();
}
Set<Integer> getValues() {
return new HashSet<>(region.keySetOnServer());
}
void insertValues(int upperLimit) {
Map values = new HashMap<Integer, String>();
IntStream.rangeClosed(1, upperLimit).forEach(i -> values.put(i, "value" + i));
region.putAll(values);
}
void printValues(Set<Integer> values) {
values.forEach(key -> System.out.println(String.format("%d:%s", key, region.get(key))));
}
}
| 4,518 |
0 | Create_ds/geode-examples/transaction/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/transaction/src/main/java/org/apache/geode_examples/transaction/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.transaction;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
public class Example {
public static final int INCREMENTS = 1000;
public static final String REGION_NAME = "example-region";
public static final String KEY = "counter";
final Region<String, Integer> region;
final Map<Integer, Process> children = new HashMap<>();
static String constructJVMPath() {
StringBuilder builder = new StringBuilder();
builder.append(System.getProperty("java.home"));
builder.append(File.separator);
builder.append("bin");
builder.append(File.separator);
builder.append("java");
if (System.getProperty("os.name").toLowerCase().contains("win")) {
builder.append("w.exe");
}
return builder.toString();
}
public static void main(String[] args) {
// connect to the locator using default port 10334
ClientCache cache = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.set("log-level", "WARN").create();
// create a local region that matches the server region
ClientRegionFactory<String, Integer> clientRegionFactory =
cache.createClientRegionFactory(ClientRegionShortcut.PROXY);
Region<String, Integer> region = clientRegionFactory.create(REGION_NAME);
Example example = new Example(region);
example.initializeEntry();
example.executeChildProcesses(5);
cache.close();
}
Example(Region<String, Integer> region) {
this.region = region;
}
void executeChildProcess(int id) {
String[] command = new String[5];
command[0] = constructJVMPath();
command[1] = "-classpath";
command[2] = System.getProperty("java.class.path") + ":build/libs/transaction.jar";
command[3] = "org.apache.geode_examples.transaction.Incrementer";
command[4] = Integer.toString(id);
try {
children.put(id, Runtime.getRuntime().exec(command));
System.out.println("Executed child " + id);
} catch (IOException ioe) {
ioe.printStackTrace();
}
}
void executeChildProcesses(int numberOfIncrementers) {
System.out.println("Expected value of counter: " + (numberOfIncrementers * INCREMENTS));
for (int i = 0; i < numberOfIncrementers; ++i) {
executeChildProcess(i + 1);
}
for (Map.Entry<Integer, Process> child : children.entrySet()) {
System.out.println("Waiting for " + child.getKey() + "...");
try {
child.getValue().waitFor();
System.out.println("Reaped child " + child.getKey());
} catch (InterruptedException ie) {
ie.printStackTrace();
}
}
System.out.println("Actual value of counter: " + region.get(KEY));
}
void initializeEntry() {
region.put(KEY, 0);
}
}
| 4,519 |
0 | Create_ds/geode-examples/transaction/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/transaction/src/main/java/org/apache/geode_examples/transaction/Incrementer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.transaction;
import org.apache.geode.cache.CacheTransactionManager;
import org.apache.geode.cache.CommitConflictException;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
public class Incrementer {
final int id;
final ClientCache cache;
final Region<String, Integer> region;
Incrementer(int id, ClientCache cache, Region<String, Integer> region) {
this.id = id;
this.cache = cache;
this.region = region;
}
public static void main(String[] args) {
// connect to the locator using default port 10334
ClientCache cache = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.set("log-level", "WARN").create();
// create a local region that matches the server region
ClientRegionFactory<String, Integer> clientRegionFactory =
cache.createClientRegionFactory(ClientRegionShortcut.PROXY);
Region<String, Integer> region = clientRegionFactory.create(Example.REGION_NAME);
Incrementer incrementer = new Incrementer(Integer.parseInt(args[0]), cache, region);
incrementer.incrementEntry();
cache.close();
}
void incrementEntry() {
CacheTransactionManager cacheTransactionManager = cache.getCacheTransactionManager();
for (int i = 0; i < Example.INCREMENTS; ++i) {
boolean incremented = false;
while (!incremented) {
try {
cacheTransactionManager.begin();
final Integer oldValue = region.get(Example.KEY);
final Integer newValue = oldValue + 1;
region.put(Example.KEY, newValue);
cacheTransactionManager.commit();
incremented = true;
} catch (CommitConflictException cce) {
// Do nothing.
}
}
}
}
}
| 4,520 |
0 | Create_ds/geode-examples/colocation/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/colocation/src/main/java/org/apache/geode_examples/colocation/Order.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.colocation;
import java.io.Serializable;
public class Order implements Serializable {
private static final long serialVersionUID = 41372560L;
private OrderKey key;
public Order() {}
public Order(int orderId, int customerId) {
this.key = new OrderKey(orderId, customerId);
}
public OrderKey getKey() {
return key;
}
public int getOrderId() {
return this.getKey().getOrderId();
}
public int getCustomerId() {
return this.getKey().getCustomerId();
}
}
| 4,521 |
0 | Create_ds/geode-examples/colocation/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/colocation/src/main/java/org/apache/geode_examples/colocation/OrderKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.colocation;
import java.io.Serializable;
public class OrderKey implements Serializable {
private static final long serialVersionUID = 60372860L;
private Integer customerId;
private Integer orderId;
public OrderKey() {}
public OrderKey(Integer orderId, Integer customerId) {
this.orderId = orderId;
this.customerId = customerId;
}
public Integer getCustomerId() {
return customerId;
}
public Integer getOrderId() {
return orderId;
}
@Override
public int hashCode() {
int result = orderId.hashCode();
result = 31 * result + customerId;
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
OrderKey other = (OrderKey) obj;
if (!orderId.equals(other.getOrderId()))
return false;
if (!customerId.equals(other.getCustomerId()))
return false;
return true;
}
@Override
public String toString() {
return "OrderKey [orderId=" + orderId + ", customerId=" + customerId + "]";
}
}
| 4,522 |
0 | Create_ds/geode-examples/colocation/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/colocation/src/main/java/org/apache/geode_examples/colocation/OrderPartitionResolver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.colocation;
import org.apache.geode.cache.EntryOperation;
import org.apache.geode.cache.PartitionResolver;
public class OrderPartitionResolver implements PartitionResolver {
@Override
public Object getRoutingObject(EntryOperation opDetails) {
OrderKey key = (OrderKey) opDetails.getKey();
return key.getCustomerId();
}
@Override
public String getName() {
return getClass().getName();
}
}
| 4,523 |
0 | Create_ds/geode-examples/colocation/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/colocation/src/main/java/org/apache/geode_examples/colocation/Customer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.colocation;
import java.io.Serializable;
public class Customer implements Serializable {
private static final long serialVersionUID = 95541179L;
private int id;
private String firstName;
private String lastName;
private String email;
public Customer() {}
public Customer(int id, String firstName, String lastName, String email) {
this.id = id;
this.firstName = firstName;
this.lastName = lastName;
this.email = email;
}
public int getId() {
return id;
}
public String getFirstName() {
return firstName;
}
public String getLastName() {
return lastName;
}
public String getEmail() {
return email;
}
public String toString() {
return "Customer [id=" + id + "firstName=" + firstName + ", lastName=" + lastName + ", email="
+ email + "]";
}
}
| 4,524 |
0 | Create_ds/geode-examples/colocation/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/colocation/src/main/java/org/apache/geode_examples/colocation/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.colocation;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
import java.util.HashMap;
import java.util.Map;
public class Example {
private int maximum;
public static void main(String[] args) {
ClientCache cache = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.set("log-level", "WARN").create();
Region<Integer, Customer> customerRegion =
cache.<Integer, Customer>createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
.create("customer");
Region<OrderKey, Order> orderRegion =
cache.<OrderKey, Order>createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
.create("order");
Map<Integer, Customer> customers = generateCustomers();
for (int i : customers.keySet()) {
Customer customer = customers.get(i);
Order order = new Order(i * 10, customer.getId());
customerRegion.put(customer.getId(), customer);
orderRegion.put(order.getKey(), order);
}
cache.close();
}
public static Map<Integer, Customer> generateCustomers() {
String firstNames[] =
{"Albert", "Bob", "Charles", "Daniel", "Ethan", "Frank", "Gregory", "Henrik"};
String lastNames[] =
{"Anthony", "Barkley", "Chen", "Dalembert", "English", "French", "Gobert", "Hakanson"};
String emails[] = new String[firstNames.length];
for (int i = 0; i < firstNames.length; i++) {
emails[i] = firstNames[i].toLowerCase() + "." + lastNames[i].toLowerCase() + "@example.com";
}
Map<Integer, Customer> customers = new HashMap<Integer, Customer>();
for (int i = 0; i < firstNames.length; i++) {
customers.put(i + 1, new Customer(i + 1, firstNames[i], lastNames[i], emails[i]));
}
return customers;
}
}
| 4,525 |
0 | Create_ds/geode-examples/replicated/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/replicated/src/main/java/org/apache/geode_examples/replicated/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.replicated;
import java.util.HashSet;
import java.util.Set;
import java.util.stream.IntStream;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
public class Example {
private final Region<Integer, String> region;
public Example(Region<Integer, String> region) {
this.region = region;
}
public static void main(String[] args) {
// connect to the locator using default port 10334
ClientCache cache = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.set("log-level", "WARN").create();
// create a local region that matches the server region
Region<Integer, String> region =
cache.<Integer, String>createClientRegionFactory(ClientRegionShortcut.PROXY)
.create("example-region");
Example example = new Example(region);
example.insertValues(10);
example.printValues(example.getValues());
cache.close();
}
Set<Integer> getValues() {
return new HashSet<>(region.keySetOnServer());
}
void insertValues(int upperLimit) {
IntStream.rangeClosed(1, upperLimit).forEach(i -> region.put(i, "value" + i));
}
void printValues(Set<Integer> values) {
values.forEach(key -> System.out.println(String.format("%d:%s", key, region.get(key))));
}
}
| 4,526 |
0 | Create_ds/geode-examples/listener/src/test/java/org/apache/geode/examples | Create_ds/geode-examples/listener/src/test/java/org/apache/geode/examples/listener/ExampleTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.listener;
import static org.junit.Assert.assertEquals;
import java.util.HashMap;
import java.util.Map;
import org.junit.Test;
public class ExampleTest {
@Test
public void testExample() throws Exception {
Example example = new Example();
Map<Integer, String> region = new HashMap<>();
example.putEntries(region);
assertEquals(Example.ITERATIONS, region.size());
}
}
| 4,527 |
0 | Create_ds/geode-examples/listener/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/listener/src/main/java/org/apache/geode_examples/listener/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.listener;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import java.util.Queue;
import java.util.Random;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.stream.IntStream;
import org.apache.geode.cache.CacheListener;
import org.apache.geode.cache.EntryEvent;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
public class Example {
public static final int ITERATIONS = 100;
public static void main(String[] args) {
// connect to the locator using default port 10334
ClientCache cache = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.set("log-level", "WARN").create();
Example example = new Example();
// create a local region that matches the server region
ClientRegionFactory<Integer, String> clientRegionFactory =
cache.createClientRegionFactory(ClientRegionShortcut.PROXY);
clientRegionFactory.addCacheListener(new ExampleCacheListener());
Region<Integer, String> region = clientRegionFactory.create("example-region");
example.putEntries(region);
cache.close();
}
private Collection<Integer> generateIntegers() {
IntStream stream = new Random().ints(0, ITERATIONS);
Iterator<Integer> iterator = stream.iterator();
Collection<Integer> integers = new ArrayList<>();
while (iterator.hasNext() && integers.size() < ITERATIONS) {
Integer integer = iterator.next();
if (!integers.contains(integer)) {
integers.add(integer);
}
}
return integers;
}
public void putEntries(Map<Integer, String> region) {
Collection<Integer> integers = generateIntegers();
Iterator<Integer> iterator = integers.iterator();
while (iterator.hasNext()) {
Integer integer = iterator.next();
region.put(integer, integer.toString());
}
System.out.println("Created " + integers.size() + " entries.");
}
}
| 4,528 |
0 | Create_ds/geode-examples/listener/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/listener/src/main/java/org/apache/geode_examples/listener/ExampleCacheListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.listener;
import java.util.LinkedList;
import java.util.Queue;
import org.apache.geode.cache.CacheListener;
import org.apache.geode.cache.EntryEvent;
import org.apache.geode.cache.RegionEvent;
import org.apache.geode.cache.util.CacheListenerAdapter;
public class ExampleCacheListener extends CacheListenerAdapter<Integer, String> {
public ExampleCacheListener() {}
@Override
public void afterCreate(EntryEvent<Integer, String> event) {
System.out.println("received create for key " + event.getKey());
}
}
| 4,529 |
0 | Create_ds/geode-examples/lucene/src/test/java/org/apache/geode/examples | Create_ds/geode-examples/lucene/src/test/java/org/apache/geode/examples/lucene/ExampleTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.lucene;
import static org.junit.Assert.assertEquals;
import java.util.HashMap;
import java.util.Map;
import org.junit.Test;
public class ExampleTest {
@Test
public void testInsertEntries() throws Exception {
Map<Integer, EmployeeData> region = new HashMap<>();
Example.insertValues(region);
assertEquals(10, region.size());
}
}
| 4,530 |
0 | Create_ds/geode-examples/lucene/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/lucene/src/main/java/org/apache/geode_examples/lucene/EmployeeData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.lucene;
import java.io.Serializable;
import java.util.Collection;
public class EmployeeData implements Serializable {
private static final long serialVersionUID = 1L;
private String firstName;
private String lastName;
private int emplNumber;
private String email;
private int salary;
private int hoursPerWeek;
private Collection<Contact> contacts;
public EmployeeData(String firstName, String lastName, int emplNumber, String email, int salary,
int hoursPerWeek, Collection<Contact> contacts) {
this.firstName = firstName;
this.lastName = lastName;
this.emplNumber = emplNumber;
this.email = email;
this.salary = salary;
this.hoursPerWeek = hoursPerWeek;
this.contacts = contacts;
}
public String getFirstName() {
return firstName;
}
public String getLastName() {
return lastName;
}
public int getEmplNumber() {
return emplNumber;
}
public String getEmail() {
return email;
}
public int getSalary() {
return salary;
}
public int getHoursPerWeek() {
return hoursPerWeek;
}
public Collection<Contact> getContacts() {
return this.contacts;
}
@Override
public String toString() {
return "EmployeeData [firstName=" + firstName + ", lastName=" + lastName + ", emplNumber="
+ emplNumber + ", email= " + email + ", salary=" + salary + ", hoursPerWeek=" + hoursPerWeek
+ ", contacts=" + contacts + "]";
}
}
| 4,531 |
0 | Create_ds/geode-examples/lucene/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/lucene/src/main/java/org/apache/geode_examples/lucene/Contact.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.lucene;
import java.io.Serializable;
import java.util.Arrays;
public class Contact implements Serializable {
private String name;
private String[] phoneNumbers;
Contact(String name, String[] phoneNumbers) {
this.name = name;
this.phoneNumbers = phoneNumbers;
}
public String getName() {
return this.name;
}
public String[] getPhones() {
return this.phoneNumbers;
}
@Override
public String toString() {
return "(name=" + name + ", phones=" + Arrays.toString(phoneNumbers) + ")";
}
}
| 4,532 |
0 | Create_ds/geode-examples/lucene/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/lucene/src/main/java/org/apache/geode_examples/lucene/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.lucene;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Map;
import java.util.Random;
import java.util.function.Consumer;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
import org.apache.geode.cache.lucene.LuceneQuery;
import org.apache.geode.cache.lucene.LuceneQueryException;
import org.apache.geode.cache.lucene.LuceneService;
import org.apache.geode.cache.lucene.LuceneServiceProvider;
public class Example {
// These index names are predefined in gfsh scripts
final static String SIMPLE_INDEX = "simpleIndex";
final static String ANALYZER_INDEX = "analyzerIndex";
final static String NESTEDOBJECT_INDEX = "nestedObjectIndex";
// These region names are prefined in gfsh scripts
final static String EXAMPLE_REGION = "example-region";
public static void main(String[] args) throws LuceneQueryException {
// connect to the locator using default port 10334
ClientCache cache = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.set("log-level", "WARN").create();
// create a local region that matches the server region
Region<Integer, EmployeeData> region =
cache.<Integer, EmployeeData>createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
.create("example-region");
insertValues(region);
query(cache);
queryNestedObject(cache);
cache.close();
}
private static void query(ClientCache cache) throws LuceneQueryException {
LuceneService lucene = LuceneServiceProvider.get(cache);
LuceneQuery<Integer, EmployeeData> query = lucene.createLuceneQueryFactory()
.create(SIMPLE_INDEX, EXAMPLE_REGION, "firstName:Chris~2", "firstname");
System.out.println("Employees with first names like Chris: " + query.findValues());
}
private static void queryNestedObject(ClientCache cache) throws LuceneQueryException {
LuceneService lucene = LuceneServiceProvider.get(cache);
LuceneQuery<Integer, EmployeeData> query = lucene.createLuceneQueryFactory().create(
NESTEDOBJECT_INDEX, EXAMPLE_REGION, "5035330001 AND 5036430001", "contacts.phoneNumbers");
System.out.println("Employees with phone number 5035330001 and 5036430001 in their contacts: "
+ query.findValues());
}
public static void insertValues(Map<Integer, EmployeeData> region) {
// insert values into the region
String[] firstNames = "Alex,Bertie,Kris,Dale,Frankie,Jamie,Morgan,Pat,Ricky,Taylor".split(",");
String[] lastNames = "Able,Bell,Call,Driver,Forth,Jive,Minnow,Puts,Reliable,Tack".split(",");
String[] contactNames = "Jack,John,Tom,William,Nick,Jason,Daniel,Sue,Mary,Mark".split(",");
int salaries[] = new int[] {60000, 80000, 75000, 90000, 100000};
int hours[] = new int[] {40, 40, 40, 30, 20};
int emplNumber = 10000;
for (int index = 0; index < firstNames.length; index++) {
emplNumber = emplNumber + index;
Integer key = emplNumber;
String email = firstNames[index] + "." + lastNames[index] + "@example.com";
// Generating random number between 0 and 100000 for salary
int salary = salaries[index % 5];
int hoursPerWeek = hours[index % 5];
ArrayList<Contact> contacts = new ArrayList();
Contact contact1 = new Contact(contactNames[index] + " Jr",
new String[] {"50353" + (30000 + index), "50363" + (30000 + index)});
Contact contact2 = new Contact(contactNames[index],
new String[] {"50354" + (30000 + index), "50364" + (30000 + index)});
contacts.add(contact1);
contacts.add(contact2);
EmployeeData val = new EmployeeData(firstNames[index], lastNames[index], emplNumber, email,
salary, hoursPerWeek, contacts);
region.put(key, val);
}
}
}
| 4,533 |
0 | Create_ds/geode-examples/clientSecurity/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/clientSecurity/src/main/java/org/apache/geode_examples/clientSecurity/ExampleAuthInit.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.clientSecurity;
import java.util.Properties;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.geode.LogWriter;
import org.apache.geode.distributed.DistributedMember;
import org.apache.geode.security.AuthInitialize;
import org.apache.geode.security.AuthenticationFailedException;
public class ExampleAuthInit implements AuthInitialize {
private static final Logger logger = LogManager.getLogger();
private static final String USER_NAME = "security-username";
private static final String PASSWORD = "security-password";
private static final String INSECURE_PASSWORD_FOR_EVERY_USER = "123";
/**
* The implementer would use their existing infrastructure (e.g., ldap) here to populate these
* properties with the user credentials. These properties will in turn be handled by the
* implementer's design of SecurityManager to authenticate users and authorize operations.
*/
@Override
public Properties getCredentials(Properties securityProps) throws AuthenticationFailedException {
Properties credentials = new Properties();
String userName = securityProps.getProperty(USER_NAME);
if (userName == null) {
throw new AuthenticationFailedException(
"ExampleAuthInit: user name property [" + USER_NAME + "] not set.");
}
credentials.setProperty(USER_NAME, userName);
credentials.setProperty(PASSWORD, INSECURE_PASSWORD_FOR_EVERY_USER);
logger.info("SampleAuthInit: successfully obtained credentials for user " + userName);
return credentials;
}
@Override
public void close() {}
@Override
@Deprecated
public void init(LogWriter systemLogger, LogWriter securityLogger)
throws AuthenticationFailedException {}
@Override
@Deprecated
public Properties getCredentials(Properties securityProps, DistributedMember server,
boolean isPeer) throws AuthenticationFailedException {
return getCredentials(securityProps);
}
}
| 4,534 |
0 | Create_ds/geode-examples/clientSecurity/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/clientSecurity/src/main/java/org/apache/geode_examples/clientSecurity/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.clientSecurity;
import java.util.Properties;
import org.apache.commons.lang3.Validate;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
public class Example implements AutoCloseable {
private static final Logger logger = LogManager.getLogger();
private static final String REGION1 = "region1";
private static final String REGION2 = "region2";
// Some example data
private static final String AUTHOR_GROSSMAN = "Grossman";
private static final String BOOK_BY_GROSSMAN = "Soon I Will Be Invincible";
private static final String AUTHOR_ROTHFUSS = "Rothfuss";
private static final String BOOK_BY_ROTHFUSS = "The Name of the Wind";
private static final String AUTHOR_LYNCH = "Lynch";
private static final String BOOK_BY_LYNCH = "The Lies of Locke Lamora";
private static final String AUTHOR_SCALZI = "Scalzi";
private static final String BOOK_BY_SCALZI = "Old Man's War";
private static final String AUTHOR_SANDERSON = "Sanderson";
private static final String BOOK_BY_SANDERSON = "The Way of Kings";
private static final String AUTHOR_ABERCROMBIE = "Abercrombie";
private static final String BOOK_BY_ABERCROMBIE = "The Blade Itself";
// Each example will have its own proxy for the cache and both regions.
private final ClientCache cache;
private final Region<String, String> region1;
private final Region<String, String> region2;
private Example(String username) {
Properties props = new Properties();
props.setProperty("security-username", username);
props.setProperty("security-client-auth-init", ExampleAuthInit.class.getName());
props.setProperty("ssl-enabled-components", "all");
props.setProperty("ssl-keystore", "keystore.jks");
props.setProperty("ssl-keystore-password", "password");
props.setProperty("ssl-truststore", "truststore.jks");
props.setProperty("ssl-truststore-password", "password");
// connect to the locator using default port 10334
cache = new ClientCacheFactory(props).setPoolSubscriptionEnabled(true)
.addPoolLocator("localhost", 10334).create();
region1 = cache.<String, String>createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
.create(REGION1);
region2 = cache.<String, String>createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
.create(REGION2);
}
public static void main(String[] args) throws Exception {
adminUserCanPutAndGetEverywhere();
writeOnlyUserCannotGet();
readOnlyUserCannotPut();
regionUserIsRestrictedByRegion();
}
private static void adminUserCanPutAndGetEverywhere() throws Exception {
String valueFromRegion;
try (Example example = new Example("superUser")) {
// All puts and gets should pass
example.region1.put(AUTHOR_ABERCROMBIE, BOOK_BY_ABERCROMBIE);
example.region2.put(AUTHOR_GROSSMAN, BOOK_BY_GROSSMAN);
valueFromRegion = example.region1.get(AUTHOR_ABERCROMBIE);
Validate.isTrue(BOOK_BY_ABERCROMBIE.equals(valueFromRegion));
valueFromRegion = example.region2.get(AUTHOR_GROSSMAN);
Validate.isTrue(BOOK_BY_GROSSMAN.equals(valueFromRegion));
}
}
private static void writeOnlyUserCannotGet() {
try (Example example = new Example("dataWriter")) {
// Writes to any region should pass
example.region1.put(AUTHOR_LYNCH, BOOK_BY_LYNCH);
example.region2.put(AUTHOR_ROTHFUSS, BOOK_BY_ROTHFUSS);
// This will fail since dataWriter does not have DATA:READ
example.region1.get(AUTHOR_LYNCH);
} catch (Exception e) {
logger.error("This exception should be caused by NotAuthorizedException", e);
}
}
private static void readOnlyUserCannotPut() {
try (Example example = new Example("dataReader")) {
// This will pass
example.region1.get(AUTHOR_LYNCH);
example.region2.get(AUTHOR_ROTHFUSS);
// This will fail since dataReader does not have DATA:WRITE
example.region1.put(AUTHOR_SANDERSON, BOOK_BY_SANDERSON);
} catch (Exception e) {
logger.error("This exception should be caused by NotAuthorizedException", e);
}
}
private static void regionUserIsRestrictedByRegion() {
try (Example example = new Example("region1dataAdmin")) {
// This user can read and write only in region1
example.region1.put(AUTHOR_SANDERSON, BOOK_BY_SANDERSON);
String valueFromRegion = example.region1.get(AUTHOR_SANDERSON);
Validate.isTrue(BOOK_BY_SANDERSON.equals(valueFromRegion));
// This will fail since dataReader does not have DATA:WRITE:region2
example.region2.put(AUTHOR_SCALZI, BOOK_BY_SCALZI);
} catch (Exception e) {
logger.error("This exception should be caused by NotAuthorizedException", e);
}
}
/**
* We use AutoCloseable examples to guarantee the cache closes. Failure to close the cache would
* cause failures when attempting to run with a new user.
*/
@Override
public void close() throws Exception {
cache.close();
}
}
| 4,535 |
0 | Create_ds/geode-examples/serialization/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/serialization/src/main/java/org/apache/geode_examples/serialization/Country.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.serialization;
/**
* <strong>Explicitly</strong> not serializable by java.io.Serializable,
* org.apache.geode.DataSerializable, or org.apache.geode.pdx.PdxSerializable.
*/
public class Country {
protected String name;
protected String capitol;
protected String language;
protected String currency;
protected int population;
public Country() {
this("", "", "", "", 0);
}
protected Country(String name, String capitol, String language, String currency, int population) {
this.name = name;
this.capitol = capitol;
this.language = language;
this.currency = currency;
this.population = population;
}
public String getName() {
return name;
}
public String getCapitol() {
return capitol;
}
public void setCapitol(String capitol) {
this.capitol = capitol;
}
public String getLanguage() {
return language;
}
public void setLanguage(String language) {
this.language = language;
}
public String getCurrency() {
return currency;
}
public void setCurrency(String currency) {
this.currency = currency;
}
public int getPopulation() {
return population;
}
public void setPopulation(int population) {
this.population = population;
}
public String toString() {
StringBuilder builder = new StringBuilder();
if (name != null && !name.isEmpty()) {
builder.append(name);
builder.append(" (");
if (capitol != null && !capitol.isEmpty()) {
if (0 < builder.length() && '(' != builder.charAt(builder.length() - 1)) {
builder.append(", ");
}
builder.append("Capitol: ");
builder.append(capitol);
}
if (language != null && !language.isEmpty()) {
if (0 < builder.length() && '(' != builder.charAt(builder.length() - 1)) {
builder.append(", ");
}
builder.append("Language: ");
builder.append(language);
}
if (currency != null && !currency.isEmpty()) {
if (0 < builder.length() && '(' != builder.charAt(builder.length() - 1)) {
builder.append(", ");
}
builder.append("Currency: ");
builder.append(currency);
}
if (0 < population) {
if (0 < builder.length() && '(' != builder.charAt(builder.length() - 1)) {
builder.append(", ");
}
builder.append("Population: ");
builder.append(population);
}
builder.append(")");
}
return builder.toString();
}
}
| 4,536 |
0 | Create_ds/geode-examples/serialization/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/serialization/src/main/java/org/apache/geode_examples/serialization/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.serialization;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
import org.apache.geode.pdx.ReflectionBasedAutoSerializer;
import java.util.HashSet;
import java.util.Set;
public class Example {
public static final String ARENDELLE = "Arendelle";
public static final String BORDURIA = "Borduria";
public static final String CASCADIA = "Cascadia";
public static final String ELBONIA = "Elbonia";
public static final String FLORIN = "Florin";
public static final String GRAUSTARK = "Graustark";
public static final String LATVERIA = "Latveria";
public static final String MARKOVIA = "Markovia";
public static final String PARADOR = "Parador";
public static final String SIERRA_GORDO = "Sierra Gordo";
final Region<String, Country> region;
public Example(Region<String, Country> region) {
this.region = region;
}
public static void main(String[] args) {
// connect to the locator using default port 10334
ClientCache cache = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.set("log-level", "WARN")
.setPdxSerializer(
new ReflectionBasedAutoSerializer("org.apache.geode_examples.serialization.Country"))
.create();
// create a local region that matches the server region
Region<String, Country> region =
cache.<String, Country>createClientRegionFactory(ClientRegionShortcut.PROXY)
.create("example-region");
Example example = new Example(region);
example.insertValues();
example.printValues(example.getKeys());
cache.close();
}
Country create(String name) {
return create(name, name + " City");
}
Country create(String name, String capitol) {
return create(name, capitol, "");
}
Country create(String name, String capitol, String language) {
return create(name, capitol, language, "", 0);
}
Country create(String name, String capitol, String language, String currency, int population) {
return new Country(name, capitol, language, currency, population);
}
Set<String> getKeys() {
return new HashSet<>(region.keySetOnServer());
}
void insertValues() {
insertValue(create(ARENDELLE, "Arendelle City", "Arendellii", "Arendelle Krona", 76573));
insertValue(create(BORDURIA, "Szohôd", "Bordurian", "Bordurian Dinar", 1000000));
insertValue(create(CASCADIA, "Portland", "Pacific Northwest English", "United States Dollar",
16029520));
insertValue(create(ELBONIA));
insertValue(create(FLORIN));
insertValue(create(GRAUSTARK, "Edelweiss"));
insertValue(create(LATVERIA, "Doomstadt", "Latverian", "Latverian Franc", 500000));
insertValue(create(MARKOVIA, "Markovburg", "German"));
insertValue(create(PARADOR));
insertValue(create(SIERRA_GORDO, "Rio Lindo", "Spanish"));
}
void insertValue(Country country) {
region.put(country.getName(), country);
}
void printValues(Set<String> keys) {
for (String key : keys) {
Country country = region.get(key);
System.out.println(key + ": " + country);
}
}
}
| 4,537 |
0 | Create_ds/geode-examples/async/src/test/java/org/apache/geode/examples | Create_ds/geode-examples/async/src/test/java/org/apache/geode/examples/async/ExampleAsyncEventListenerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.async;
import static org.junit.Assert.assertEquals;
import java.util.LinkedList;
import java.util.List;
import org.junit.Test;
import org.apache.geode.cache.Operation;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.asyncqueue.AsyncEvent;
import org.apache.geode.cache.wan.EventSequenceID;
public class ExampleAsyncEventListenerTest {
@Test
public void testAfterCreate() {
ExampleAsyncEventListener listener = new ExampleAsyncEventListener();
List<AsyncEvent> events = new LinkedList<AsyncEvent>();
events.add(new TestAsyncEvent<Integer, String>(null, Operation.CREATE, 1, "teh"));
events.add(new TestAsyncEvent<Integer, String>(null, Operation.CREATE, 2, "wil"));
events.add(new TestAsyncEvent<Integer, String>(null, Operation.CREATE, 3, "i"));
assertEquals(true, listener.processEvents(events));
}
@Test
public void testSpellCheck() {
ExampleAsyncEventListener listener = new ExampleAsyncEventListener();
assertEquals("that", listener.spellCheck("that"));
assertEquals("the", listener.spellCheck("teh"));
assertEquals("will", listener.spellCheck("wil"));
assertEquals("I", listener.spellCheck("i"));
}
public class TestAsyncEvent<K, V> implements AsyncEvent<K, V> {
private final Region region;
private final Operation operation;
private final K key;
private final V value;
public TestAsyncEvent(Region region, Operation operation, K key, V value) {
this.region = region;
this.operation = operation;
this.key = key;
this.value = value;
}
@Override
public boolean getPossibleDuplicate() {
return false;
}
@Override
public EventSequenceID getEventSequenceID() {
return null;
}
@Override
public Region<K, V> getRegion() {
return region;
}
@Override
public Operation getOperation() {
return operation;
}
@Override
public Object getCallbackArgument() {
return null;
}
@Override
public K getKey() {
return key;
}
@Override
public V getDeserializedValue() {
return value;
}
@Override
public byte[] getSerializedValue() {
return new byte[0];
}
}
}
| 4,538 |
0 | Create_ds/geode-examples/async/src/test/java/org/apache/geode/examples | Create_ds/geode-examples/async/src/test/java/org/apache/geode/examples/async/LevenshteinDistanceTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.async;
import static org.junit.Assert.assertEquals;
import org.junit.Test;
public class LevenshteinDistanceTest {
@Test
public void testCalculate() throws Exception {
LevenshteinDistance distance = new LevenshteinDistance();
assertEquals(0, distance.calculate(null, null));
assertEquals(0, distance.calculate(null, ""));
assertEquals(0, distance.calculate("", null));
assertEquals(0, distance.calculate("", ""));
assertEquals(3, distance.calculate(null, "foo"));
assertEquals(3, distance.calculate("foo", null));
assertEquals(3, distance.calculate("", "foo"));
assertEquals(3, distance.calculate("foo", ""));
assertEquals(3, distance.calculate("foo", "bar"));
assertEquals(2, distance.calculate("foo", "ofo"));
assertEquals(2, distance.calculate("foo", "oof"));
assertEquals(1, distance.calculate("the", "th"));
assertEquals(1, distance.calculate("the", "he"));
assertEquals(2, distance.calculate("the", "teh"));
assertEquals(2, distance.calculate("project", "porject"));
}
}
| 4,539 |
0 | Create_ds/geode-examples/async/src/test/java/org/apache/geode/examples | Create_ds/geode-examples/async/src/test/java/org/apache/geode/examples/async/ExampleTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.async;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.util.Arrays;
import java.util.List;
import org.apache.geode.cache.Region;
import org.junit.Test;
public class ExampleTest {
@Test
public void testExample() throws Exception {
Example example = new Example();
Region<String, String> outgoingRegion = mock(Region.class);
Region<Integer, String> incomingRegion = mock(Region.class);
final List<String> words = Arrays.asList(new String[] {"that", "teh"});
when(outgoingRegion.sizeOnServer()).thenReturn(words.size());
example.checkWords(incomingRegion, outgoingRegion, words);
verify(incomingRegion).put(eq(0), eq(words.get(0)));
verify(incomingRegion).put(eq(1), eq(words.get(1)));
}
}
| 4,540 |
0 | Create_ds/geode-examples/async/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/async/src/main/java/org/apache/geode_examples/async/LevenshteinDistance.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.async;
/**
* The Levenshtein distance is a measure of the difference between two strings of characters. It can
* be useful in determining when two strings are very much alike, e.g., a transposed character.
* While not as powerful as other techniques, one use is simple spell-checking.
*/
public class LevenshteinDistance {
public int calculate(String first, String second) {
if (first == null || first.isEmpty())
return (second == null ? 0 : second.length());
if (second == null || second.isEmpty())
return (first == null ? 0 : first.length());
final String firstPrime = first.substring(0, first.length() - 1);
final String secondPrime = second.substring(0, second.length() - 1);
final int cost =
((first.charAt(first.length() - 1) == second.charAt(second.length() - 1)) ? 0 : 1);
return Math.min(Math.min(calculate(firstPrime, second) + 1, calculate(first, secondPrime) + 1),
calculate(firstPrime, secondPrime) + cost);
}
}
| 4,541 |
0 | Create_ds/geode-examples/async/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/async/src/main/java/org/apache/geode_examples/async/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.async;
import java.util.Arrays;
import java.util.List;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
import org.apache.geode.cache.client.PoolManager;
public class Example {
public static final String INCOMING_REGION_NAME = "incoming-region";
public static final String OUTGOING_REGION_NAME = "outgoing-region";
public static void main(String[] args) {
// connect to the locator using default port 10334
ClientCache cache = new ClientCacheFactory().set("log-level", "WARN").create();
final String poolName = "subscriptionPool";
PoolManager.createFactory().addLocator("127.0.0.1", 10334).setSubscriptionEnabled(true)
.create(poolName);
// create a local region that matches the server region
final ClientRegionFactory<Integer, String> incomingRegionFactory =
cache.<Integer, String>createClientRegionFactory(ClientRegionShortcut.PROXY);
Region<Integer, String> incomingRegion =
incomingRegionFactory.setPoolName(poolName).create(INCOMING_REGION_NAME);
// create another local region that matches the server region
final ClientRegionFactory<String, String> outgoingRegionFactory =
cache.<String, String>createClientRegionFactory(ClientRegionShortcut.PROXY);
Region<String, String> outgoingRegion =
outgoingRegionFactory.setPoolName(poolName).create(OUTGOING_REGION_NAME);
new Example().checkWords(incomingRegion, outgoingRegion,
Arrays.asList(new String[] {"that", "teh", "wil", "i'"}));
cache.close();
}
public void checkWords(Region<Integer, String> incomingRegion,
Region<String, String> outgoingRegion, List<String> words) {
int key = 0;
for (String word : words) {
incomingRegion.put(key++, word);
}
// Give the process a chance to work.
while (outgoingRegion.sizeOnServer() < words.size()) {
try {
Thread.sleep(500);
} catch (InterruptedException ie) {
// NOP
}
}
for (String candidate : outgoingRegion.keySetOnServer()) {
System.out.println(candidate + " -> " + outgoingRegion.get(candidate));
}
}
}
| 4,542 |
0 | Create_ds/geode-examples/async/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/async/src/main/java/org/apache/geode_examples/async/ExampleAsyncEventListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.async;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import org.apache.geode.cache.Cache;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.asyncqueue.AsyncEvent;
import org.apache.geode.cache.asyncqueue.AsyncEventListener;
public class ExampleAsyncEventListener implements AsyncEventListener {
private final String[] words =
{"the", "be", "to", "of", "and", "I", "a", "in", "that", "have", "it", "for", "not", "on",
"with", "he", "as", "you", "do", "at", "this", "but", "his", "by", "from", "they", "we",
"say", "her", "she", "or", "an", "will", "my", "one", "all", "would", "there", "their",
"what", "so", "up", "out", "if", "about", "who", "get", "which", "go", "me"};
private final LevenshteinDistance distance = new LevenshteinDistance();
public String spellCheck(String candidate) {
int index = -1;
int shortest = Integer.MAX_VALUE;
for (int i = 0; i < words.length; ++i) {
final String word = words[i];
final int score = distance.calculate(word, candidate);
if (score < shortest) {
index = i;
shortest = score;
}
}
if (0 <= index) {
return words[index];
}
return candidate;
}
@Override
public boolean processEvents(List<AsyncEvent> events) {
final ExecutorService exService = Executors.newSingleThreadExecutor();
for (AsyncEvent<Integer, String> event : events) {
final String oldValue = event.getDeserializedValue();
final String newValue = spellCheck(oldValue);
exService.submit(() -> {
Cache cache = (Cache) event.getRegion().getRegionService();
Region<String, String> region = cache.getRegion(Example.OUTGOING_REGION_NAME);
region.put(oldValue, newValue);
});
}
return true;
}
@Override
public void close() {
// NOP
}
}
| 4,543 |
0 | Create_ds/geode-examples/queries/src/test/java/org/apache/geode_examples | Create_ds/geode-examples/queries/src/test/java/org/apache/geode_examples/queries/ExampleTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.queries;
import static org.junit.Assert.assertEquals;
import org.junit.Test;
import java.util.Map;
public class ExampleTest {
@Test
public void testCreateEmployeeData() {
Map<Integer, EmployeeData> data = Example.createEmployeeData();
assertEquals(14, data.size());
}
}
| 4,544 |
0 | Create_ds/geode-examples/queries/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/queries/src/main/java/org/apache/geode_examples/queries/EmployeeData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.queries;
import java.io.Serializable;
public class EmployeeData implements Serializable {
private static final long serialVersionUID = 1L;
private String firstName;
private String lastName;
private int emplNumber;
private String email;
private int salary;
private int hoursPerWeek;
public EmployeeData(String firstName, String lastName, int emplNumber, String email, int salary,
int hoursPerWeek) {
this.firstName = firstName;
this.lastName = lastName;
this.emplNumber = emplNumber;
this.email = email;
this.salary = salary;
this.hoursPerWeek = hoursPerWeek;
}
public String getFirstName() {
return firstName;
}
public String getLastName() {
return lastName;
}
public int getEmplNumber() {
return emplNumber;
}
public String getEmail() {
return email;
}
public int getSalary() {
return salary;
}
public int getHoursPerWeek() {
return hoursPerWeek;
}
public String toString() {
return "EmployeeData [firstName=" + firstName + ", lastName=" + lastName + ", emplNumber="
+ emplNumber + ", email= " + email + ", salary=" + salary + ", hoursPerWeek=" + hoursPerWeek
+ "]";
}
}
| 4,545 |
0 | Create_ds/geode-examples/queries/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/queries/src/main/java/org/apache/geode_examples/queries/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.queries;
import java.util.HashMap;
import java.util.Map;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
import org.apache.geode.cache.query.FunctionDomainException;
import org.apache.geode.cache.query.NameResolutionException;
import org.apache.geode.cache.query.QueryInvocationTargetException;
import org.apache.geode.cache.query.QueryService;
import org.apache.geode.cache.query.SelectResults;
import org.apache.geode.cache.query.TypeMismatchException;
public class Example {
static String REGIONNAME = "example-region";
static String QUERY1 = "SELECT DISTINCT * FROM /" + REGIONNAME;
static String QUERY2 = "SELECT DISTINCT * FROM /" + REGIONNAME + " h WHERE h.hoursPerWeek < 40";
static String QUERY3 = "SELECT DISTINCT * FROM /" + REGIONNAME + " x WHERE x.lastName=$1";
public static void main(String[] args) throws NameResolutionException, TypeMismatchException,
QueryInvocationTargetException, FunctionDomainException {
// connect to the locator using default port 10334
ClientCache cache = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.set("log-level", "WARN").create();
// create a region on the server
Region<Integer, EmployeeData> region =
cache.<Integer, EmployeeData>createClientRegionFactory(ClientRegionShortcut.PROXY)
.create(REGIONNAME);
// create a set of employee data and put it into the region
Map<Integer, EmployeeData> employees = createEmployeeData();
region.putAll(employees);
// count the values in the region
int inserted = region.keySetOnServer().size();
System.out.println(String.format("Counted %d keys in region %s", inserted, region.getName()));
// fetch and print all values in the region (without using a query)
region.keySetOnServer().forEach(key -> System.out.println(region.get(key)));
// do a set of queries, printing the results of each query
doQueries(cache);
cache.close();
}
public static Map<Integer, EmployeeData> createEmployeeData() {
String[] firstNames =
"Alex,Bertie,Kris,Dale,Frankie,Jamie,Morgan,Pat,Ricky,Taylor,Casey,Jessie,Ryan,Skyler"
.split(",");
String[] lastNames =
"Able,Bell,Call,Driver,Forth,Jive,Minnow,Puts,Reliable,Tack,Catch,Jam,Redo,Skip".split(",");
int salaries[] = new int[] {60000, 80000, 75000, 90000, 100000};
int hours[] = new int[] {40, 40, 40, 40, 30, 20};
int emplNumber = 10000;
// put data into the hashmap
Map<Integer, EmployeeData> employees = new HashMap<Integer, EmployeeData>();
for (int index = 0; index < firstNames.length; index++) {
emplNumber = emplNumber + index;
String email = firstNames[index] + "." + lastNames[index] + "@example.com";
int salary = salaries[index % 5];
int hoursPerWeek = hours[index % 6];
EmployeeData value = new EmployeeData(firstNames[index], lastNames[index], emplNumber, email,
salary, hoursPerWeek);
employees.put(emplNumber, value);
}
return employees;
}
// Demonstrate querying using the API by doing 3 queries.
public static void doQueries(ClientCache cache) throws NameResolutionException,
TypeMismatchException, QueryInvocationTargetException, FunctionDomainException {
QueryService queryService = cache.getQueryService();
// Query for every entry in the region, and print query results.
System.out.println("\nExecuting query: " + QUERY1);
SelectResults<EmployeeData> results =
(SelectResults<EmployeeData>) queryService.newQuery(QUERY1).execute();
printSetOfEmployees(results);
// Query for all part time employees, and print query results.
System.out.println("\nExecuting query: " + QUERY2);
results = (SelectResults<EmployeeData>) queryService.newQuery(QUERY2).execute();
printSetOfEmployees(results);
// Query for last name of Jive, and print the full name and employee number.
System.out.println("\nExecuting query: " + QUERY3);
results =
(SelectResults<EmployeeData>) queryService.newQuery(QUERY3).execute(new String[] {"Jive"});
for (EmployeeData eachEmployee : results) {
System.out.println(String.format("Employee %s %s has employee number %d",
eachEmployee.getFirstName(), eachEmployee.getLastName(), eachEmployee.getEmplNumber()));
}
}
private static void printSetOfEmployees(SelectResults<EmployeeData> results) {
System.out.println("Query returned " + results.size() + " results.");
for (EmployeeData eachEmployee : results) {
System.out.println(String.format("Employee: %s", eachEmployee.toString()));
}
}
}
| 4,546 |
0 | Create_ds/geode-examples/micrometerMetrics/src/test/java/org/apache/geode_examples | Create_ds/geode-examples/micrometerMetrics/src/test/java/org/apache/geode_examples/micrometerMetrics/SimpleMetricsPublishingServiceTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.micrometerMetrics;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.catchThrowable;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.verify;
import static org.mockito.quality.Strictness.STRICT_STUBS;
import java.io.IOException;
import io.micrometer.prometheus.PrometheusMeterRegistry;
import org.apache.http.HttpResponse;
import org.apache.http.HttpStatus;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.conn.HttpHostConnectException;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.util.EntityUtils;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.junit.MockitoJUnit;
import org.mockito.junit.MockitoRule;
import org.apache.geode.metrics.MetricsPublishingService;
import org.apache.geode.metrics.MetricsSession;
public class SimpleMetricsPublishingServiceTest {
@Rule
public MockitoRule mockitoRule = MockitoJUnit.rule().strictness(STRICT_STUBS);
@Mock
public MetricsSession metricsSession;
private MetricsPublishingService subject;
@Before
public void setUp() {
subject = new SimpleMetricsPublishingService(9000);
}
@Test
public void start_addsRegistryToMetricsSession() {
subject.start(metricsSession);
verify(metricsSession).addSubregistry(any(PrometheusMeterRegistry.class));
subject.stop(metricsSession);
}
@Test
public void start_addsAnHttpEndpointThatReturnsStatusOK() throws IOException {
subject.start(metricsSession);
HttpGet request = new HttpGet("http://localhost:9000/");
HttpResponse response = HttpClientBuilder.create().build().execute(request);
assertThat(response.getStatusLine().getStatusCode()).isEqualTo(HttpStatus.SC_OK);
subject.stop(metricsSession);
}
@Test
public void start_addsAnHttpEndpointThatContainsRegistryData() throws IOException {
subject.start(metricsSession);
HttpGet request = new HttpGet("http://localhost:9000/");
HttpResponse response = HttpClientBuilder.create().build().execute(request);
String responseBody = EntityUtils.toString(response.getEntity());
assertThat(responseBody).isEmpty();
subject.stop(metricsSession);
}
@Test
public void stop_removesRegistryFromMetricsSession() {
subject.start(metricsSession);
subject.stop(metricsSession);
verify(metricsSession).removeSubregistry(any(PrometheusMeterRegistry.class));
}
@Test
public void stop_hasNoHttpEndpointRunning() {
subject.start(metricsSession);
subject.stop(metricsSession);
HttpGet request = new HttpGet("http://localhost:9000/");
Throwable thrown = catchThrowable(() -> HttpClientBuilder.create().build().execute(request));
assertThat(thrown).isInstanceOf(HttpHostConnectException.class);
}
}
| 4,547 |
0 | Create_ds/geode-examples/micrometerMetrics/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/micrometerMetrics/src/main/java/org/apache/geode_examples/micrometerMetrics/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.micrometerMetrics;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.stream.IntStream;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
public class Example {
public static void main(String[] args) {
addCacheEntries();
verifyPrometheusEndpointsAreRunning();
}
private static void addCacheEntries() {
// connect to the locator using default port
ClientCache cache = new ClientCacheFactory().addPoolLocator("localhost", 10334)
.set("log-level", "WARN").create();
// create a local region that matches the server region
Region<Integer, String> region =
cache.<Integer, String>createClientRegionFactory(ClientRegionShortcut.PROXY)
.create("example-region");
// add entries to the region
IntStream.rangeClosed(1, 10).forEach(i -> region.put(i, "value" + i));
System.out.println(String.format("The entry count for region %s on the server is %d.",
region.getName(), region.sizeOnServer()));
cache.close();
}
private static void verifyPrometheusEndpointsAreRunning() {
String[] endpoints = {"http://localhost:9914", "http://localhost:9915"};
for (String endpoint : endpoints) {
try {
URL url = new URL(endpoint);
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.connect();
if (HttpURLConnection.HTTP_OK != connection.getResponseCode()) {
throw new IllegalStateException(
"Prometheus endpoint returned status code " + connection.getResponseCode());
}
} catch (IOException e) {
throw new IllegalStateException("Failed to connect to Prometheus endpoint", e);
}
System.out.println("A Prometheus endpoint is running at " + endpoint + ".");
}
}
}
| 4,548 |
0 | Create_ds/geode-examples/micrometerMetrics/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/micrometerMetrics/src/main/java/org/apache/geode_examples/micrometerMetrics/SimpleMetricsPublishingService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.micrometerMetrics;
import static io.micrometer.prometheus.PrometheusConfig.DEFAULT;
import static java.lang.Integer.getInteger;
import static org.slf4j.LoggerFactory.getLogger;
import java.io.IOException;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import com.sun.net.httpserver.HttpContext;
import com.sun.net.httpserver.HttpExchange;
import com.sun.net.httpserver.HttpServer;
import io.micrometer.prometheus.PrometheusMeterRegistry;
import org.slf4j.Logger;
import org.apache.geode.metrics.MetricsPublishingService;
import org.apache.geode.metrics.MetricsSession;
public class SimpleMetricsPublishingService implements MetricsPublishingService {
private static final String PORT_PROPERTY = "prometheus.metrics.port";
private static final int DEFAULT_PORT = 0; // If no port specified, use any port
private static final String HOSTNAME = "localhost";
private static final int PORT = getInteger(PORT_PROPERTY, DEFAULT_PORT);
private static Logger LOG = getLogger(SimpleMetricsPublishingService.class);
private final int port;
private PrometheusMeterRegistry registry;
private HttpServer server;
public SimpleMetricsPublishingService() {
this(PORT);
}
public SimpleMetricsPublishingService(int port) {
this.port = port;
}
@Override
public void start(MetricsSession session) {
registry = new PrometheusMeterRegistry(DEFAULT);
session.addSubregistry(registry);
InetSocketAddress address = new InetSocketAddress(HOSTNAME, port);
server = null;
try {
server = HttpServer.create(address, 0);
HttpContext context = server.createContext("/");
context.setHandler(this::requestHandler);
server.start();
int boundPort = server.getAddress().getPort();
LOG.info("Started {} http://{}:{}/", getClass().getSimpleName(), HOSTNAME, boundPort);
} catch (IOException thrown) {
LOG.error("Exception while starting " + getClass().getSimpleName(), thrown);
}
}
private void requestHandler(HttpExchange httpExchange) throws IOException {
final byte[] scrapeBytes = registry.scrape().getBytes();
httpExchange.sendResponseHeaders(200, scrapeBytes.length);
final OutputStream responseBody = httpExchange.getResponseBody();
responseBody.write(scrapeBytes);
responseBody.close();
}
@Override
public void stop(MetricsSession session) {
session.removeSubregistry(registry);
registry = null;
server.stop(0);
}
}
| 4,549 |
0 | Create_ds/geode-examples/jdbc/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/jdbc/src/main/java/org/apache/geode_examples/jdbc/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.jdbc;
import java.util.HashSet;
import java.util.Set;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
import org.apache.geode.pdx.ReflectionBasedAutoSerializer;
public class Example {
private final Region<Long, Parent> region;
public Example(Region<Long, Parent> region) {
this.region = region;
}
public static void main(String[] args) {
// connect to the locator using default port 10334
ClientCache cache =
new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.setPdxSerializer(
new ReflectionBasedAutoSerializer("org.apache.geode_examples.jdbc.Parent"))
.create();
// create a local region that connects to the server region
Region<Long, Parent> region =
cache.<Long, Parent>createClientRegionFactory(ClientRegionShortcut.PROXY).create("Parent");
System.out.println("Region=" + region.getFullPath());
Example example = new Example(region);
// Put entry in Parent region to verify it propagates to the external RDBMS table
Long key = Long.valueOf(1);
Parent value = new Parent(key, "Parent_1", Double.valueOf(123456789.0));
region.put(key, value);
System.out.println("Region.put() added an entry into Parent region. The key is " + key
+ ", and the value is " + value + ".");
System.out.println(
"If JDBC Connector is configured, the value will be persisted to external data source.");
// Get an entry from Parent region that will trigger the cache loader to
// retrieve the entry from the external table
System.out.println(
"Calling Region.get(). If JDBC Connector is configured, it will retrieve data from external data source and return a non-null value.");
key = Long.valueOf(2);
Parent parent = (Parent) region.get(key);
System.out.println("The returned value of Region.get(" + key + ") is " + parent + ".");
// Print the current entries in the region
System.out.println("All entries currently in Parent region");
example.printValues(example.getKeys());
cache.close();
}
Set<Long> getKeys() {
return new HashSet<>(region.keySetOnServer());
}
void printValues(Set<Long> values) {
values.forEach(key -> System.out.println(String.format("%d:%s", key, region.get(key))));
}
}
| 4,550 |
0 | Create_ds/geode-examples/jdbc/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/jdbc/src/main/java/org/apache/geode_examples/jdbc/Parent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.jdbc;
public class Parent {
private Long id;
private String name;
private Double income;
public Parent() {}
public Parent(Long id, String name, Double income) {
this.id = id;
this.name = name;
this.income = income;
}
public Long getId() {
return this.id;
}
public String getName() {
return this.name;
}
public Double getIncome() {
return this.income;
}
public void setId(Long id) {
this.id = id;
}
public void setName(String name) {
this.name = name;
}
public void setIncome(Double income) {
this.income = income;
}
@Override
public String toString() {
return "Parent [Id=" + id + ", name=" + name + ", income=" + income + "]";
}
}
| 4,551 |
0 | Create_ds/geode-examples/writer/src/test/java/org/apache/geode_examples | Create_ds/geode-examples/writer/src/test/java/org/apache/geode_examples/writer/SSNVetterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.writer;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
public class SSNVetterTest {
@Test
public void testIsValid() throws Exception {
SSNVetter vetter = new SSNVetter();
assertTrue(vetter.isValid("123-45-6789"));
assertFalse(vetter.isValid("666-66-6666"));
assertTrue(vetter.isValid("777-77-7777"));
assertFalse(vetter.isValid("8675309"));
assertFalse(vetter.isValid("999-000-0000"));
}
}
| 4,552 |
0 | Create_ds/geode-examples/writer/src/test/java/org/apache/geode_examples | Create_ds/geode-examples/writer/src/test/java/org/apache/geode_examples/writer/ExampleTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.writer;
import static org.junit.Assert.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.Arrays;
import org.apache.geode.cache.CacheWriterException;
import org.apache.geode.cache.Region;
import org.junit.Test;
public class ExampleTest {
@Test
public void testExample() throws Exception {
Example example = new Example();
Region<String, String> region = mock(Region.class);
when(region.put(eq("666-66-6666"), any())).thenThrow(new CacheWriterException());
when(region.put(eq("8675309"), any())).thenThrow(new CacheWriterException());
when(region.put(eq("999-000-0000"), any())).thenThrow(new CacheWriterException());
assertEquals(Arrays.asList(new String[] {"Bart Simpson", "Raymond Babbitt"}),
example.getValidNames(region));
}
}
| 4,553 |
0 | Create_ds/geode-examples/writer/src/test/java/org/apache/geode_examples | Create_ds/geode-examples/writer/src/test/java/org/apache/geode_examples/writer/ExampleCacheWriterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.writer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import org.junit.Test;
import org.apache.geode.cache.CacheWriterException;
import org.apache.geode.cache.EntryEvent;
public class ExampleCacheWriterTest {
@Test(expected = CacheWriterException.class)
public void testBeforeCreateFailsForBadSSN() throws Exception {
ExampleCacheWriter writer = new ExampleCacheWriter();
EntryEvent<String, String> event = mock(EntryEvent.class);
when(event.getKey()).thenReturn("666-66-6666");
writer.beforeCreate(event);
}
@Test
public void testBeforeCreatePassesWithGoodSSN() throws Exception {
ExampleCacheWriter writer = new ExampleCacheWriter();
EntryEvent<String, String> event = mock(EntryEvent.class);
when(event.getKey()).thenReturn("555-66-6666");
writer.beforeCreate(event);
}
}
| 4,554 |
0 | Create_ds/geode-examples/writer/src/main/java/org/apache/geode/examples | Create_ds/geode-examples/writer/src/main/java/org/apache/geode/examples/writer/ExampleCacheWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.writer;
import org.apache.geode.cache.CacheWriter;
import org.apache.geode.cache.CacheWriterException;
import org.apache.geode.cache.EntryEvent;
import org.apache.geode.cache.RegionEvent;
public class ExampleCacheWriter implements CacheWriter<String, String> {
final SSNVetter vetter = new SSNVetter();
@Override
public void beforeUpdate(EntryEvent<String, String> event) throws CacheWriterException {
if (!vetter.isValid(event.getKey())) {
throw new CacheWriterException("Invalid SSN");
}
}
@Override
public void beforeCreate(EntryEvent<String, String> event) throws CacheWriterException {
if (!vetter.isValid(event.getKey())) {
throw new CacheWriterException("Invalid SSN");
}
}
@Override
public void beforeDestroy(EntryEvent<String, String> event) throws CacheWriterException {
// N/A
}
@Override
public void beforeRegionDestroy(RegionEvent<String, String> event) throws CacheWriterException {
// N/A
}
@Override
public void beforeRegionClear(RegionEvent<String, String> event) throws CacheWriterException {
// N/A
}
@Override
public void close() {
// N/A
}
}
| 4,555 |
0 | Create_ds/geode-examples/writer/src/main/java/org/apache/geode/examples | Create_ds/geode-examples/writer/src/main/java/org/apache/geode/examples/writer/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.writer;
import java.util.ArrayList;
import java.util.List;
import org.apache.geode.cache.CacheWriterException;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
import org.apache.geode.cache.client.ServerOperationException;
public class Example {
public static void main(String[] args) {
// connect to the locator using default port 10334
ClientCache cache = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.set("log-level", "WARN").create();
// create a local region that matches the server region
Region<String, String> region =
cache.<String, String>createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
.create("example-region");
new Example().getValidNames(region);
cache.close();
}
private void addName(Region<String, String> region, String ssn, String name, List<String> names) {
try {
region.put(ssn, name);
names.add(name);
} catch (CacheWriterException | ServerOperationException e) {
System.out.println("Invalid SSN: " + ssn);
}
}
public List<String> getValidNames(Region<String, String> region) {
List<String> names = new ArrayList<>();
addName(region, "123-45-6789", "Bart Simpson", names);
addName(region, "666-66-6666", "Bill Gates", names);
addName(region, "777-77-7777", "Raymond Babbitt", names);
addName(region, "8675309", "Jenny", names);
addName(region, "999-000-0000", "Blackberry", names);
return names;
}
}
| 4,556 |
0 | Create_ds/geode-examples/writer/src/main/java/org/apache/geode/examples | Create_ds/geode-examples/writer/src/main/java/org/apache/geode/examples/writer/SSNVetter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.writer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class SSNVetter {
private final Pattern ssnPattern = Pattern.compile("^\\s*([0-8]\\d{2})-?\\d{2}-?\\d{4}\\s*$");
public boolean isValid(String text) {
final Matcher m = ssnPattern.matcher(text);
if (m.matches() && !m.group(1).equals("666")) {
return true;
}
return false;
}
}
| 4,557 |
0 | Create_ds/geode-examples/loader/src/test/java/org/apache/geode/examples | Create_ds/geode-examples/loader/src/test/java/org/apache/geode/examples/loader/ExampleTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.loader;
import static org.apache.geode_examples.loader.Example.printQuotes;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.HashMap;
import java.util.Map;
import org.apache.geode.cache.LoaderHelper;
import org.junit.Rule;
import org.junit.Test;
import org.junit.contrib.java.lang.system.SystemOutRule;
import org.mockito.Mockito;
public class ExampleTest {
@Rule
public SystemOutRule systemOutRule = new SystemOutRule().enableLog();
@Test
public void testExample() throws Exception {
QuoteLoader loader = new QuoteLoader();
Map<String, String> region = Mockito.spy(new HashMap<>());
when(region.get(any())).then(inv -> {
String key = inv.getArgument(0);
LoaderHelper<String, String> helper = mock(LoaderHelper.class);
when(helper.getKey()).thenReturn(key);
return loader.load(helper);
});
printQuotes(region);
assertThat(systemOutRule.getLog()).contains("Anton Chekhov");
assertThat(systemOutRule.getLog()).contains("Loaded 20 definitions");
assertThat(systemOutRule.getLog()).contains("Fetched 20 cached definitions");
}
}
| 4,558 |
0 | Create_ds/geode-examples/loader/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/loader/src/main/java/org/apache/geode_examples/loader/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.loader;
import java.util.Arrays;
import java.util.Map;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
public class Example {
private static final String[] AUTHORS =
("Anton Chekhov,C. J. Cherryh,Dorothy Parker,Douglas Adams,Emily Dickinson,"
+ "Ernest Hemingway,F. Scott Fitzgerald,Henry David Thoreau,Henry Wadsworth Longfellow,"
+ "Herman Melville,Jean-Paul Sartre,Mark Twain,Orson Scott Card,Ray Bradbury,Robert Benchley,"
+ "Somerset Maugham,Stephen King,Terry Pratchett,Ursula K. Le Guin,William Faulkner")
.split(",");
public static void main(String[] args) {
// connect to the locator using default port 10334
ClientCache cache = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.set("log-level", "WARN").create();
// create a local region that matches the server region
Region<String, String> region =
cache.<String, String>createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
.create("example-region");
printQuotes(region);
cache.close();
}
public static void printQuotes(Map<String, String> region) {
// initial fetch invokes the cache loader
{
long elapsed = printQuotesAndMeasureTime(region);
System.out.println(
String.format("\n\nLoaded %d definitions in %d ms\n\n", AUTHORS.length, elapsed));
}
// fetch from cache, really fast!
{
long elapsed = printQuotesAndMeasureTime(region);
System.out.println(
String.format("\n\nFetched %d cached definitions in %d ms\n\n", AUTHORS.length, elapsed));
}
}
private static long printQuotesAndMeasureTime(Map<String, String> region) {
long start = System.currentTimeMillis();
Arrays.stream(AUTHORS)
.forEach(author -> System.out.println(author + ": " + region.get(author)));
return System.currentTimeMillis() - start;
}
}
| 4,559 |
0 | Create_ds/geode-examples/loader/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/loader/src/main/java/org/apache/geode_examples/loader/QuoteLoader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.loader;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.geode.cache.CacheLoader;
import org.apache.geode.cache.CacheLoaderException;
import org.apache.geode.cache.Declarable;
import org.apache.geode.cache.LoaderHelper;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class QuoteLoader implements CacheLoader<String, String> {
private static final Logger log = LogManager.getLogger(QuoteLoader.class);
private final Map<String, String> quotes;
public QuoteLoader() {
quotes = getQuotes();
}
@Override
public void init(Properties props) {}
@Override
public void close() {}
@Override
public String load(LoaderHelper<String, String> helper) throws CacheLoaderException {
log.info("Loading quote for {}", helper.getKey());
String quote = quotes.get(helper.getKey());
try {
// simulate network delay for a REST call or a database query
Thread.sleep(100);
return quote;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new CacheLoaderException(e);
}
}
Map<String, String> getQuotes() {
Map<String, String> quotes = new ConcurrentHashMap<String, String>();
// sourced from http://www.writersdigest.com/writing-quotes
quotes.put("Anton Chekhov",
"My own experience is that once a story has been written, one has to cross out the beginning and the end. It is there that we authors do most of our lying.");
quotes.put("C. J. Cherryh",
"It is perfectly okay to write garbage—as long as you edit brilliantly.");
quotes.put("Dorothy Parker", "I can’t write five words but that I change seven.");
quotes.put("Douglas Adams",
"I love deadlines. I like the whooshing sound they make as they fly by.");
quotes.put("Emily Dickinson", "A wounded deer leaps the highest.");
quotes.put("Ernest Hemingway", "Prose is architecture, not interior decoration.");
quotes.put("F. Scott Fitzgerald",
"Begin with an individual, and before you know it you have created a type; begin with a type, and you find you have created – nothing.");
quotes.put("Henry David Thoreau",
"Not that the story need be long, but it will take a long while to make it short.");
quotes.put("Henry Wadsworth Longfellow",
"Great is the art of beginning, but greater is the art of ending.");
quotes.put("Herman Melville", "To produce a mighty book, you must choose a mighty theme.");
quotes.put("Jean-Paul Sartre", "Poetry creates the myth, the prose writer draws its portrait.");
quotes.put("Mark Twain",
"Most writers regard the truth as their most valuable possession, and therefore are most economical in its use.");
quotes.put("Orson Scott Card",
"Everybody walks past a thousand story ideas every day. The good writers are the ones who see five or six of them. Most people don’t see any.");
quotes.put("Ray Bradbury",
"Any man who keeps working is not a failure. He may not be a great writer, but if he applies the old-fashioned virtues of hard, constant labor, he’ll eventually make some kind of career for himself as a writer.");
quotes.put("Robert Benchley",
"It took me fifteen years to discover I had no talent for writing, but I couldn’t give it up because by that time I was too famous.");
quotes.put("Somerset Maugham",
"If you can tell stories, create characters, devise incidents, and have sincerity and passion, it doesn’t matter a damn how you write.");
quotes.put("Stephen King",
"I try to create sympathy for my characters, then turn the monsters loose.");
quotes.put("Terry Pratchett",
"There’s no such thing as writer’s block. That was invented by people in California who couldn’t write.");
quotes.put("Ursula K. Le Guin",
"The unread story is not a story; it is little black marks on wood pulp. The reader, reading it, makes it live: a live thing, a story.");
quotes.put("William Faulkner",
"Get it down. Take chances. It may be bad, but it’s the only way you can do anything really good.");
return quotes;
}
}
| 4,560 |
0 | Create_ds/geode-examples/sessionState/webapp/src/main | Create_ds/geode-examples/sessionState/webapp/src/main/java/GeodeSessionStateServlet.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
import java.io.IOException;
import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpSession;
@WebServlet(name = "GeodeSessionStateServlet", urlPatterns = {"/index"})
public class GeodeSessionStateServlet extends HttpServlet {
protected void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
HttpSession session = request.getSession();
if (session.isNew()) {
request.setAttribute("isNew", "Session is new.");
} else {
request.setAttribute("isNew", "Session already existing");
session.setMaxInactiveInterval(90);
}
if (request.getParameter("action") != null) {
if (request.getParameter("action").equals("Set Attribute")
&& request.getParameter("key") != null && !request.getParameter("value").equals("null")) {
session.setAttribute(request.getParameter("key"), request.getParameter("value"));
}
if (request.getParameter("action").equals("Get Attribute")
&& request.getParameter("key") != null) {
request.setAttribute("getKey", session.getAttribute(request.getParameter("key")));
}
if (request.getParameter("action").equals("Delete Attribute")
&& request.getParameter("key") != null) {
session.removeAttribute(request.getParameter("key"));
}
}
request.getRequestDispatcher("/index.jsp").forward(request, response);
}
}
| 4,561 |
0 | Create_ds/geode-examples/sessionState/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/sessionState/src/main/java/org/apache/geode_examples/sessionState/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.sessionState;
public class Example {
public static void main(String[] args) {
System.out.println(
"Refer to the README.md located in geode_examples/sessionState for info on how to run the session state demo.");
}
}
| 4,562 |
0 | Create_ds/geode-examples/wan/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/wan/src/main/java/org/apache/geode_examples/wan/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.wan;
import java.util.HashSet;
import java.util.Set;
import java.util.stream.IntStream;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
public class Example {
private final Region<Integer, String> region;
public Example(Region<Integer, String> region) {
this.region = region;
}
public static void main(String[] args) {
// connect to the locator in London cluster using port 10332
ClientCache cache = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10332)
.set("log-level", "WARN").create();
// create a local region that matches the server region
Region<Integer, String> region =
cache.<Integer, String>createClientRegionFactory(ClientRegionShortcut.PROXY)
.create("example-region");
Example example = new Example(region);
example.insertValues(10);
example.printValues(example.getValues());
cache.close();
}
Set<Integer> getValues() {
return new HashSet<>(region.keySetOnServer());
}
void insertValues(int upperLimit) {
IntStream.rangeClosed(1, upperLimit).forEach(i -> region.put(i, "value" + i));
}
void printValues(Set<Integer> values) {
values.forEach(key -> System.out.println(String.format("%d:%s", key, region.get(key))));
}
}
| 4,563 |
0 | Create_ds/geode-examples/persistence/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/persistence/src/main/java/org/apache/geode_examples/persistence/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.persistence;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
public class Example {
private static final String KEY = "counter";
private final Region<String, Integer> region;
public static void main(String[] args) {
// connect to the locator using default port 10334
ClientCache cache = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.set("log-level", "WARN").create();
// create a local region that matches the server region
Region<String, Integer> region =
cache.<String, Integer>createClientRegionFactory(ClientRegionShortcut.PROXY)
.create("example-region");
Example example = new Example(region);
final int previous = example.getCounter();
example.increment();
final int current = example.getCounter();
System.out.println(previous + " -> " + current);
cache.close();
}
public Example(Region<String, Integer> region) {
this.region = region;
if (!region.containsKeyOnServer(KEY)) {
region.put(KEY, 0);
}
}
public int getCounter() {
return region.get(KEY);
}
public void increment() {
region.put(KEY, region.get(KEY) + 1);
}
}
| 4,564 |
0 | Create_ds/geode-examples/compression/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/compression/src/main/java/org/apache/geode_examples/compression/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.compression;
import java.util.HashSet;
import java.util.Set;
import java.util.stream.IntStream;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.*;
public class Example {
private final Region<Integer, String> region;
private static final String POOL_NAME = "client-pool";
public Example(Region<Integer, String> region) {
this.region = region;
}
public static void main(String[] args) {
ClientCache cache = new ClientCacheFactory().set("log-level", "WARN").create();
// connect to the locator using default port 10334
PoolFactory poolFactory = PoolManager.createFactory();
poolFactory.addLocator("127.0.0.1", 10334);
poolFactory.create(POOL_NAME);
// create a local region that matches the server region
Region<Integer, String> region =
cache.<Integer, String>createClientRegionFactory(ClientRegionShortcut.PROXY)
.setPoolName(POOL_NAME).create("example-region");
Example example = new Example(region);
example.putValues(10);
example.printValues(example.getValues());
cache.close();
}
Set<Integer> getValues() {
return new HashSet<>(region.keySetOnServer());
}
void putValues(int upperLimit) {
IntStream.rangeClosed(1, upperLimit).forEach(i -> region.put(i, "value" + i));
}
void printValues(Set<Integer> values) {
values.forEach(key -> System.out.println(String.format("%d:%s", key, region.get(key))));
}
}
| 4,565 |
0 | Create_ds/geode-examples/durableMessaging/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/durableMessaging/src/main/java/org/apache/geode_examples/durableMessaging/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.durableMessaging;
import static org.apache.geode.distributed.ConfigurationProperties.DURABLE_CLIENT_ID;
import static org.apache.geode.distributed.ConfigurationProperties.DURABLE_CLIENT_TIMEOUT;
import static org.apache.geode.distributed.ConfigurationProperties.LOG_LEVEL;
import java.util.concurrent.CountDownLatch;
import org.apache.geode.cache.EntryEvent;
import org.apache.geode.cache.InterestResultPolicy;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
import org.apache.geode.cache.util.CacheListenerAdapter;
public class Example {
private static final int numEvents = 10;
private static final CountDownLatch waitForEventsLatch = new CountDownLatch(numEvents);
public static void main(String[] args) throws Exception {
ClientCache clientCacheOne = createDurableClient();
final String regionName = "example-region";
// Create a local caching proxy region that matches the server region
ClientRegionFactory<Integer, String> clientOneRegionFactory =
clientCacheOne.createClientRegionFactory(ClientRegionShortcut.PROXY);
Region<Integer, String> exampleClientRegionOne = clientOneRegionFactory.create(regionName);
// Register interest to create the durable client message queue
exampleClientRegionOne.registerInterestForAllKeys(InterestResultPolicy.DEFAULT, true);
// Close the client cache with keepalive set to true so
// the durable client messages are preserved
// for the duration of the configured timeout. In practice,
// it is more likely the client would disconnect
// due to a temporary network issue, but for this example the cache is explicitly closed.
clientCacheOne.close(true);
// Create a second client to do puts with while the first client is disconnected
ClientCache clientCacheTwo = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.set("log-level", "WARN").create();
ClientRegionFactory<Integer, String> clientTwoRegionFactory =
clientCacheTwo.createClientRegionFactory(ClientRegionShortcut.PROXY);
Region<Integer, String> exampleClientRegionTwo = clientTwoRegionFactory.create(regionName);
for (int i = 0; i < numEvents; ++i) {
exampleClientRegionTwo.put(i, "testValue" + i);
}
// Close the second client and restart the durable client
clientCacheTwo.close(false);
clientCacheOne = createDurableClient();
// Add an example cache listener so this client can react
// when the server sends this client's events from the
// durable message queue. This isn't required but helps
// illustrate that the events are delivered successfully.
clientOneRegionFactory = clientCacheOne.createClientRegionFactory(ClientRegionShortcut.PROXY);
exampleClientRegionOne = clientOneRegionFactory
.addCacheListener(new ExampleCacheListener<Integer, String>()).create(regionName);
// Signal to the server that this client is ready to receive events.
// Events in this client's durable message queue
// will then be delivered and trigger our example cache listener.
clientCacheOne.readyForEvents();
// Use a count down latch to ensure that this client receives all queued events from the server
waitForEventsLatch.await();
}
private static ClientCache createDurableClient() {
return new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
// Provide a unique identifier for this client's durable subscription message queue
.set(DURABLE_CLIENT_ID, "1")
// Provide a timeout in seconds for how long the server will wait for the client to
// reconnect.
// If this property isn't set explicitly, it defaults to 300 seconds.
.set(DURABLE_CLIENT_TIMEOUT, "200")
// This is required so the client can register interest for all keys on this durable client
.setPoolSubscriptionEnabled(true).set(LOG_LEVEL, "WARN").create();
}
public static class ExampleCacheListener<Integer, String>
extends CacheListenerAdapter<Integer, String> {
public ExampleCacheListener() {}
@Override
public void afterCreate(EntryEvent<Integer, String> event) {
System.out.println(
"Received create for key " + event.getKey() + " after durable client reconnection");
waitForEventsLatch.countDown();
}
}
}
| 4,566 |
0 | Create_ds/geode-examples/functions/src/test/java/org/apache/geode_examples | Create_ds/geode-examples/functions/src/test/java/org/apache/geode_examples/functions/ExampleTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.functions;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import org.junit.Test;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.execute.Execution;
import org.apache.geode.cache.execute.ResultCollector;
public class ExampleTest {
@Test
public void testExample() throws Exception {
Example example = new Example(10);
Region<Integer, String> region = mock(Region.class);
List<Integer> primes = Arrays.asList(1, 2, 3, 5, 7);
ResultCollector resultCollector = mock(ResultCollector.class);
when(resultCollector.getResult()).thenReturn(primes);
Execution execution = mock(Execution.class);
when(execution.execute(PrimeNumber.ID)).thenReturn(resultCollector);
assertEquals(new HashSet(primes), example.getPrimes(region, execution));
}
}
| 4,567 |
0 | Create_ds/geode-examples/functions/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/functions/src/main/java/org/apache/geode_examples/functions/PrimeNumber.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.functions;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.execute.Function;
import org.apache.geode.cache.execute.FunctionContext;
import org.apache.geode.cache.execute.RegionFunctionContext;
public class PrimeNumber implements Function {
public static final String ID = PrimeNumber.class.getSimpleName();
private boolean isPrime(int number) {
int limit = (int) Math.floor(Math.sqrt(number));
for (int divisor = 2; divisor <= limit; ++divisor) {
if (number % divisor == 0) {
return false;
}
}
return true;
}
@Override
public String getId() {
return ID;
}
@Override
public void execute(FunctionContext context) {
RegionFunctionContext regionContext = (RegionFunctionContext) context;
Region<Integer, String> region = regionContext.getDataSet();
List<Integer> primes = new ArrayList<>();
Set<Integer> keys = region.keySet();
for (Integer key : keys) {
if (isPrime(key)) {
primes.add(key);
}
}
Collections.sort(primes);
context.getResultSender().lastResult(primes);
}
}
| 4,568 |
0 | Create_ds/geode-examples/functions/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/functions/src/main/java/org/apache/geode_examples/functions/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.functions;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.stream.IntStream;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
import org.apache.geode.cache.execute.Execution;
import org.apache.geode.cache.execute.FunctionService;
import org.apache.geode.cache.execute.ResultCollector;
public class Example {
private int maximum;
public static void main(String[] args) {
// connect to the locator using default port 10334
ClientCache cache = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.set("log-level", "WARN").create();
// create a local region that matches the server region
Region<Integer, String> region =
cache.<Integer, String>createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
.create("example-region");
Execution execution = FunctionService.onRegion(region);
new Example().getPrimes(region, execution);
cache.close();
}
public Example() {
this(100);
}
public Example(int maximum) {
this.maximum = maximum;
}
public Set<Integer> getPrimes(Region<Integer, String> region, Execution execution) {
Set<Integer> primes = new HashSet<>();
for (Integer key : (Iterable<Integer>) () -> IntStream.rangeClosed(1, maximum).iterator()) {
region.put(key, key.toString());
}
ResultCollector<Integer, List> results = execution.execute(PrimeNumber.ID);
primes.addAll(results.getResult());
System.out.println("The primes in the range from 1 to " + maximum + " are:\n" + primes);
return primes;
}
}
| 4,569 |
0 | Create_ds/geode-examples/cq/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/cq/src/main/java/org/apache/geode_examples/cq/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.cq;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
import org.apache.geode.cache.query.*;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Stopwatch;
public class Example {
private ClientCache cache;
private Region<Integer, Integer> region;
private CqQuery randomTracker;
private void init() throws CqException, RegionNotFoundException, CqExistsException {
// init cache, region, and CQ
// connect to the locator using default port 10334
this.cache = connectToLocallyRunningGeode();
// create a local region that matches the server region
this.region = cache.<Integer, Integer>createClientRegionFactory(ClientRegionShortcut.PROXY)
.create("example-region");
this.randomTracker = this.startCQ(this.cache, this.region);
}
private void run() throws InterruptedException {
this.startPuttingData(this.region);
}
private void close() throws CqException {
// close the CQ and Cache
this.randomTracker.close();
this.cache.close();
}
public static void main(String[] args) throws Exception {
Example mExample = new Example();
mExample.init();
mExample.run();
mExample.close();
System.out.println("\n---- So that is CQ's----\n");
}
private CqQuery startCQ(ClientCache cache, Region region)
throws CqException, RegionNotFoundException, CqExistsException {
// Get cache and queryService - refs to local cache and QueryService
CqAttributesFactory cqf = new CqAttributesFactory();
cqf.addCqListener(new RandomEventListener());
CqAttributes cqa = cqf.create();
String cqName = "randomTracker";
String queryStr = "SELECT * FROM /example-region i where i > 70";
QueryService queryService = region.getRegionService().getQueryService();
CqQuery randomTracker = queryService.newCq(cqName, queryStr, cqa);
randomTracker.execute();
System.out.println("------- CQ is running\n");
return randomTracker;
}
private void startPuttingData(Region region) throws InterruptedException {
// Example will run for 20 second
Stopwatch stopWatch = Stopwatch.createStarted();
while (stopWatch.elapsed(TimeUnit.SECONDS) < 20) {
// 500ms delay to make this easier to follow
Thread.sleep(500);
int randomKey = ThreadLocalRandom.current().nextInt(0, 99 + 1);
int randomValue = ThreadLocalRandom.current().nextInt(0, 100 + 1);
region.put(randomKey, randomValue);
System.out.println("Key: " + randomKey + " Value: " + randomValue);
}
stopWatch.stop();
}
private ClientCache connectToLocallyRunningGeode() {
ClientCache cache = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.setPoolSubscriptionEnabled(true).set("log-level", "WARN").create();
return cache;
}
}
| 4,570 |
0 | Create_ds/geode-examples/cq/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/cq/src/main/java/org/apache/geode_examples/cq/RandomEventListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.cq;
import org.apache.geode.cache.Operation;
import org.apache.geode.cache.query.CqEvent;
import org.apache.geode.cache.query.CqListener;
public class RandomEventListener implements CqListener {
@Override
public void onEvent(CqEvent cqEvent) {
Operation queryOperation = cqEvent.getQueryOperation();
if (queryOperation.isUpdate()) {
System.out.print("-------Updated Value\n");
} else if (queryOperation.isCreate()) {
System.out.print("-------Value Created\n");
}
}
@Override
public void onError(CqEvent cqEvent) {
System.out.print("**Something bad happened**");
}
@Override
public void close() {
}
}
| 4,571 |
0 | Create_ds/geode-examples/partitioned/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/partitioned/src/main/java/org/apache/geode_examples/partitioned/EmployeeData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.partitioned;
import java.io.Serializable;
public class EmployeeData implements Serializable {
private static final long serialVersionUID = 2095541179L;
private final EmployeeKey nameAndNumber;
private final int salary;
private final int hoursPerWeek;
public EmployeeData(EmployeeKey nameAndNumber, int salary, int hoursPerWeek) {
this.nameAndNumber = nameAndNumber;
this.salary = salary;
this.hoursPerWeek = hoursPerWeek;
}
public EmployeeKey getNameAndNumber() {
return nameAndNumber;
}
public int getSalary() {
return salary;
}
public int getHoursPerWeek() {
return hoursPerWeek;
}
@Override
public String toString() {
return "EmployeeData [nameAndNumber=" + nameAndNumber + ", salary=" + salary + ", hoursPerWeek="
+ hoursPerWeek + "]";
}
}
| 4,572 |
0 | Create_ds/geode-examples/partitioned/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/partitioned/src/main/java/org/apache/geode_examples/partitioned/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.partitioned;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Random;
import java.util.Set;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
public class Example {
private final Region<EmployeeKey, EmployeeData> region;
public Example(Region<EmployeeKey, EmployeeData> region) {
this.region = region;
}
public static void main(String[] args) {
// connect to the locator using default port 10334
ClientCache cache = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.set("log-level", "WARN").create();
// create a local region that matches the server region
Region<EmployeeKey, EmployeeData> region =
cache.<EmployeeKey, EmployeeData>createClientRegionFactory(ClientRegionShortcut.PROXY)
.create("example-region");
Example example = new Example(region);
example.insertValues(new String[] {"Alex Able", "Bertie Bell", "Chris Call", "Dale Driver",
"Frankie Forth", "Jamie Jive", "Morgan Minnow", "Pat Pearson", "Ricky Reliable",
"Taylor Tack", "Zelda Zankowski"});
example.printValues(example.getValues());
cache.close();
}
Set<EmployeeKey> getValues() {
return new HashSet<>(region.keySetOnServer());
}
void insertValues(String[] names) {
Random r = new Random();
Arrays.stream(names).forEach(name -> {
EmployeeKey key = new EmployeeKey(name, 1 + r.nextInt(1000000));
EmployeeData val = new EmployeeData(key, 50000 + r.nextInt(100000), 40);
region.put(key, val);
});
}
void printValues(Set<EmployeeKey> values) {
values.forEach(key -> System.out.println(key.getName() + " -> " + region.get(key)));
}
}
| 4,573 |
0 | Create_ds/geode-examples/partitioned/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/partitioned/src/main/java/org/apache/geode_examples/partitioned/EmployeeKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.partitioned;
import java.io.Serializable;
public class EmployeeKey implements Serializable {
private static final long serialVersionUID = 160372860L;
private final String name;
private final int emplNumber;
public EmployeeKey(String name, int emplNumber) {
this.name = name;
this.emplNumber = emplNumber;
}
public String getName() {
return name;
}
public int getEmplNumber() {
return emplNumber;
}
@Override
public int hashCode() {
int result = name.hashCode();
result = 31 * result + emplNumber;
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
EmployeeKey other = (EmployeeKey) obj;
if (emplNumber != other.emplNumber)
return false;
if (name == null) {
if (other.name != null)
return false;
} else if (!name.equals(other.name))
return false;
return true;
}
@Override
public String toString() {
return "EmployeeKey [name=" + name + ", emplNumber=" + emplNumber + "]";
}
}
| 4,574 |
0 | Create_ds/geode-examples/expiration/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/expiration/src/main/java/org/apache/geode_examples/expiration/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.expiration;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.stream.IntStream;
public class Example {
private static final DateFormat ISO_8601_TIMESTAMP_FORMAT =
new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS");
public static void main(String[] args) {
// connect to the locator using default port 10334
ClientCache cache = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.set("log-level", "WARN").create();
Example example = new Example();
// create a local region that matches the server region
ClientRegionFactory<Integer, String> clientRegionFactory =
cache.createClientRegionFactory(ClientRegionShortcut.PROXY);
Region<Integer, String> region = clientRegionFactory.create("example-region");
example.insertValues(region, example.generateIntegers(10));
example.monitorEntries(region);
cache.close();
}
private Collection<Integer> generateIntegers(int upperLimit) {
IntStream stream = new Random().ints(0, upperLimit);
Iterator<Integer> iterator = stream.iterator();
Set<Integer> integers = new LinkedHashSet<>();
while (iterator.hasNext() && integers.size() < upperLimit) {
integers.add(iterator.next());
}
return integers;
}
void insertValues(Region<Integer, String> region, Collection<Integer> integers) {
Map values = new HashMap<Integer, String>();
for (Integer i : integers) {
values.put(i, i.toString());
}
region.putAll(values);
System.out.println(
ISO_8601_TIMESTAMP_FORMAT.format(new Date()) + "\tInserted " + values.size() + " values.");
}
public void monitorEntries(Region<Integer, String> region) {
while (0 < region.sizeOnServer()) {
try {
Thread.sleep(1000);
System.out.println(ISO_8601_TIMESTAMP_FORMAT.format(new Date()) + "\tThe region now has "
+ region.sizeOnServer() + " entries.");
} catch (InterruptedException ie) {
// NOP
}
}
}
}
| 4,575 |
0 | Create_ds/geode-examples/luceneSpatial/src/test/java/org/apache/geode/examples | Create_ds/geode-examples/luceneSpatial/src/test/java/org/apache/geode/examples/luceneSpatial/TrainStopSerializerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.luceneSpatial;
import static org.junit.Assert.assertEquals;
import java.util.Collection;
import org.apache.lucene.document.Document;
import org.junit.Test;
public class TrainStopSerializerTest {
@Test
public void serializerReturnsSingleDocument() {
TrainStopSerializer serializer = new TrainStopSerializer();
Collection<Document> documents =
serializer.toDocuments(null, new TrainStop("here", -122.8515139, 45.5099231));
assertEquals(1, documents.size());
}
}
| 4,576 |
0 | Create_ds/geode-examples/luceneSpatial/src/test/java/org/apache/geode/examples | Create_ds/geode-examples/luceneSpatial/src/test/java/org/apache/geode/examples/luceneSpatial/ExampleTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.luceneSpatial;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import java.util.HashMap;
import java.util.Map;
import org.junit.Test;
import org.apache.geode.cache.lucene.LuceneService;
public class ExampleTest {
@Test
public void testPutEntries() throws InterruptedException {
LuceneService service = mock(LuceneService.class);
Map<String, TrainStop> region = new HashMap<String, TrainStop>();
Example.putEntries(service, region);
assertEquals(3, region.size());
}
}
| 4,577 |
0 | Create_ds/geode-examples/luceneSpatial/src/test/java/org/apache/geode/examples | Create_ds/geode-examples/luceneSpatial/src/test/java/org/apache/geode/examples/luceneSpatial/SpatialHelperTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.luceneSpatial;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SearcherManager;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.RAMDirectory;
import org.junit.Test;
public class SpatialHelperTest {
@Test
public void queryFindsADocumentThatWasAdded() throws IOException {
// Create an in memory lucene index to add a document to
RAMDirectory directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig());
// Add a document to the lucene index
Document document = new Document();
document.add(new TextField("name", "name", Field.Store.YES));
Field[] fields = SpatialHelper.getIndexableFields(-122.8515139, 45.5099231);
for (Field field : fields) {
document.add(field);
}
writer.addDocument(document);
writer.commit();
// Make sure a findWithin query locates the document
Query query = SpatialHelper.findWithin(-122.8515239, 45.5099331, 1);
SearcherManager searcherManager = new SearcherManager(writer, null);
IndexSearcher searcher = searcherManager.acquire();
TopDocs results = searcher.search(query, 100);
assertEquals(1, results.totalHits);
}
}
| 4,578 |
0 | Create_ds/geode-examples/luceneSpatial/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/luceneSpatial/src/main/java/org/apache/geode_examples/luceneSpatial/TrainStopSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.luceneSpatial;
import java.util.Collection;
import java.util.Collections;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.spatial.vector.PointVectorStrategy;
import org.apache.geode.cache.lucene.LuceneIndex;
import org.apache.geode.cache.lucene.LuceneSerializer;
/**
* LuceneSerializer that converts train stops into lucene documents with the gps coordinates indexed
* using lucene's {@link PointVectorStrategy}
*/
public class TrainStopSerializer implements LuceneSerializer<TrainStop> {
@Override
public Collection<Document> toDocuments(LuceneIndex index, TrainStop value) {
Document doc = new Document();
// Index the name of the train stop
doc.add(new TextField("name", value.getName(), Field.Store.NO));
Field[] fields = SpatialHelper.getIndexableFields(value.getLongitude(), value.getLatitude());
for (Field field : fields) {
doc.add(field);
}
return Collections.singleton(doc);
}
}
| 4,579 |
0 | Create_ds/geode-examples/luceneSpatial/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/luceneSpatial/src/main/java/org/apache/geode_examples/luceneSpatial/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.luceneSpatial;
import java.util.Collection;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
import org.apache.geode.cache.lucene.LuceneQuery;
import org.apache.geode.cache.lucene.LuceneQueryException;
import org.apache.geode.cache.lucene.LuceneService;
import org.apache.geode.cache.lucene.LuceneServiceProvider;
public class Example {
public static void main(String[] args) throws InterruptedException, LuceneQueryException {
// connect to the locator using default port 10334
ClientCache cache = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.set("log-level", "WARN").create();
// create a local region that matches the server region
Region<String, TrainStop> region =
cache.<String, TrainStop>createClientRegionFactory(ClientRegionShortcut.PROXY)
.create("example-region");
LuceneService luceneService = LuceneServiceProvider.get(cache);
// Add some entries into the region
putEntries(luceneService, region);
findNearbyTrainStops(luceneService);
cache.close();
}
public static void findNearbyTrainStops(LuceneService luceneService)
throws InterruptedException, LuceneQueryException {
LuceneQuery<Integer, TrainStop> query =
luceneService.createLuceneQueryFactory().create("simpleIndex", "example-region",
index -> SpatialHelper.findWithin(-122.8515139, 45.5099231, 0.25));
Collection<TrainStop> results = query.findValues();
System.out.println("Found stops: " + results);
}
public static void putEntries(LuceneService luceneService, Map<String, TrainStop> region)
throws InterruptedException {
region.put("Elmonica/SW 170th Ave",
new TrainStop("Elmonica/SW 170th Ave", -122.85146341202486, 45.509962691078009));
region.put("Willow Creek/SW 185th Ave TC",
new TrainStop("Willow Creek/SW 185th Ave TC", -122.87021024485213, 45.517251954169652));
region.put("Merlo Rd/SW 158th Ave",
new TrainStop("Merlo Rd/SW 158th Ave", -122.84216239020598, 45.505240564251949));
// Lucene indexing happens asynchronously, so wait for
// the entries to be in the lucene index.
luceneService.waitUntilFlushed("simpleIndex", "example-region", 1, TimeUnit.MINUTES);
}
}
| 4,580 |
0 | Create_ds/geode-examples/luceneSpatial/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/luceneSpatial/src/main/java/org/apache/geode_examples/luceneSpatial/TrainStop.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.luceneSpatial;
import java.io.Serializable;
public class TrainStop implements Serializable {
private static final long serialVersionUID = 1L;
private String name;
private double latitude;
private double longitude;
public TrainStop(String name, double longitude, double latitude) {
this.name = name;
this.longitude = longitude;
this.latitude = latitude;
}
public String getName() {
return name;
}
public double getLatitude() {
return latitude;
}
public double getLongitude() {
return longitude;
}
@Override
public String toString() {
return "TrainStop [name=" + name + ", location=" + longitude + ", " + latitude + "]";
}
}
| 4,581 |
0 | Create_ds/geode-examples/luceneSpatial/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/luceneSpatial/src/main/java/org/apache/geode_examples/luceneSpatial/SpatialHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.luceneSpatial;
import static org.locationtech.spatial4j.distance.DistanceUtils.EARTH_MEAN_RADIUS_MI;
import org.apache.lucene.document.Field;
import org.apache.lucene.search.Query;
import org.apache.lucene.spatial.query.SpatialArgs;
import org.apache.lucene.spatial.query.SpatialOperation;
import org.apache.lucene.spatial.vector.PointVectorStrategy;
import org.locationtech.spatial4j.context.SpatialContext;
import org.locationtech.spatial4j.distance.DistanceUtils;
import org.locationtech.spatial4j.shape.Point;
import org.locationtech.spatial4j.shape.impl.GeoCircle;
import org.locationtech.spatial4j.shape.impl.PointImpl;
public class SpatialHelper {
private static final SpatialContext CONTEXT = SpatialContext.GEO;
private static final PointVectorStrategy STRATEGY =
new PointVectorStrategy(CONTEXT, "location", PointVectorStrategy.DEFAULT_FIELDTYPE);
/**
* Return a lucene query that finds all points within the given radius from the given point
*/
public static Query findWithin(double longitude, double latitude, double radiusMiles) {
// Covert the radius in miles to a radius in degrees
double radiusDEG = DistanceUtils.dist2Degrees(radiusMiles, EARTH_MEAN_RADIUS_MI);
// Create a query that looks for all points within a circle around the given point
SpatialArgs args = new SpatialArgs(SpatialOperation.IsWithin,
new GeoCircle(createPoint(longitude, latitude), radiusDEG, CONTEXT));
return STRATEGY.makeQuery(args);
}
/**
* Return a list of fields that should be added to lucene document to index the given point
*/
public static Field[] getIndexableFields(double longitude, double latitude) {
Point point = createPoint(longitude, latitude);
return STRATEGY.createIndexableFields(point);
}
private static Point createPoint(double longitude, double latitude) {
return new PointImpl(longitude, latitude, CONTEXT);
}
}
| 4,582 |
0 | Create_ds/geode-examples/rest/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/rest/src/main/java/org/apache/geode_examples/rest/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.rest;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.URL;
import java.nio.charset.StandardCharsets;
public class Example {
private static final String GEODE_REST_END_POINT = "http://localhost:8080/gemfire-api/v1/";
private static final String GET_REQUEST_PARAMETER = "?limit=ALL";
private static final String POST_REQUEST_PARAMETER = "?key=1";
private static final String DATA = "{\"name\": \"Dan Smith\", \"technology\": \"Java\"}";
public static void main(String[] args) throws IOException {
HttpURLConnection httpPostRequestConnection = createHttpPostRequest();
writeData(httpPostRequestConnection);
HttpURLConnection httpGetRequestConnection = createHttpGetRequest();
printValues(httpGetRequestConnection);
}
private static HttpURLConnection createHttpGetRequest() throws IOException {
URL url = new URL(GEODE_REST_END_POINT + "example-region" + GET_REQUEST_PARAMETER);
HttpURLConnection httpURLConnection = getHttpURLConnection(url);
httpURLConnection.setRequestMethod("GET");
httpURLConnection.setRequestProperty("Accept", "application/json");
return httpURLConnection;
}
private static void printValues(HttpURLConnection conn) throws IOException {
try (BufferedReader br = new BufferedReader(new InputStreamReader((conn.getInputStream())))) {
String response;
while ((response = br.readLine()) != null) {
System.out.println(response);
}
} finally {
conn.disconnect();
}
}
private static HttpURLConnection createHttpPostRequest() throws IOException {
URL url = new URL(GEODE_REST_END_POINT + "example-region" + POST_REQUEST_PARAMETER);
HttpURLConnection httpURLConnection = getHttpURLConnection(url);
httpURLConnection.setRequestMethod("POST");
httpURLConnection.setRequestProperty("Content-Type", "application/json; utf-8");
httpURLConnection.setRequestProperty("Accept", "application/json");
httpURLConnection.setDoOutput(true);
return httpURLConnection;
}
private static HttpURLConnection getHttpURLConnection(URL url) throws IOException {
return (HttpURLConnection) url.openConnection();
}
private static void writeData(HttpURLConnection conn) throws IOException {
try (OutputStream outputStream = conn.getOutputStream()) {
outputStream.write(DATA.getBytes(StandardCharsets.UTF_8));
conn.getInputStream();
}
}
}
| 4,583 |
0 | Create_ds/geode-examples/eviction/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/eviction/src/main/java/org/apache/geode_examples/eviction/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.eviction;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.Random;
import java.util.Set;
import java.util.stream.IntStream;
public class Example {
public static final int ITERATIONS = 20;
public static void main(String[] args) {
// connect to the locator using default port 10334
ClientCache cache = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.set("log-level", "WARN").create();
Example example = new Example();
// create a local region that matches the server region
ClientRegionFactory<Integer, String> clientRegionFactory =
cache.createClientRegionFactory(ClientRegionShortcut.PROXY);
Region<Integer, String> region = clientRegionFactory.create("example-region");
example.putEntries(region);
cache.close();
}
private Collection<Integer> generateIntegers() {
IntStream stream = new Random().ints(0, ITERATIONS);
Iterator<Integer> iterator = stream.iterator();
Set<Integer> integers = new LinkedHashSet<>();
while (iterator.hasNext() && integers.size() < ITERATIONS) {
integers.add(iterator.next());
}
return integers;
}
public void putEntries(Region<Integer, String> region) {
Collection<Integer> integers = generateIntegers();
Iterator<Integer> iterator = integers.iterator();
int created = 0;
while (iterator.hasNext()) {
Integer integer = iterator.next();
region.put(integer, integer.toString());
System.out.println("Added value for " + integer + "; the region now has "
+ region.sizeOnServer() + " entries.");
++created;
}
}
}
| 4,584 |
0 | Create_ds/geode-examples/indexes/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/indexes/src/main/java/org/apache/geode_examples/indexes/RegionPopulator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.indexes;
import org.apache.geode.cache.Region;
import java.util.Date;
import java.util.LinkedList;
import java.util.PrimitiveIterator;
import java.util.Random;
public class RegionPopulator {
static final String[] airlines = "FFT,NKS,ASQ,AAL,UAL,SKW,SWA,HAL,JBU,VRD,DAL,ASA,EIA".split(",");
static final String[] cities =
"Arendelle City,Szohôd,Portland,Elbonia City,Florin City,Edelweiss,Doomstadt,Markovburg,Parador City,Rio Lindo"
.split(",");
static final String[] firstNames =
"Ava,Brooklyn,Charlotte,Delilah,Emma,Faith,Grace,Harper,Isabella,Julia,Kaylee,Lillian,Mia,Natalie,Olivia,Peyton,Quinn,Riley,Sophia,Taylor,Unique,Victoria,Willow,Ximena,Yaretzi,Zoey"
.split(",");
static final String[] lastNames =
"Smith,Johnson,Williams,Brown,Jones,Miller,Davis,Garcia,Rodriguez,Wilson,Martinez,Anderson,Taylor,Thomas,Hernandez,Moore,Martin,Jackson,Thompson,White,Lopez,Lee,Gonzalez,Harris,Clark,Lewis"
.split(",");
final Random random = new Random();
final LinkedList<FlightCode> flights = new LinkedList<>();
void populateRegion(Region<String, Passenger> region) {
populateFlights(50);
insertPassengers(250, region);
}
void populateFlights(int numberOfFlights) {
PrimitiveIterator.OfInt flightNumbers = random.ints(1, 1000).iterator();
PrimitiveIterator.OfInt airlineIndexes = random.ints(0, airlines.length).iterator();
PrimitiveIterator.OfInt cityIndexes = random.ints(0, cities.length).iterator();
while (flights.size() < numberOfFlights) {
String departure = cities[cityIndexes.next()];
String arrival = cities[cityIndexes.next()];
while (departure.equals(arrival)) {
arrival = cities[cityIndexes.next()];
}
FlightCode flight =
new FlightCode(airlines[airlineIndexes.next()], flightNumbers.next(), departure, arrival);
flights.add(flight);
}
}
void insertPassengers(int numberOfPassengers, Region<String, Passenger> region) {
PrimitiveIterator.OfInt firstNameIndexes = random.ints(0, firstNames.length).iterator();
PrimitiveIterator.OfInt lastNameIndexes = random.ints(0, lastNames.length).iterator();
PrimitiveIterator.OfInt ages = random.ints(20, 100).iterator();
PrimitiveIterator.OfInt flightIndexes = random.ints(0, flights.size()).iterator();
PrimitiveIterator.OfInt milliSeconds = random.ints(0, 7 * 24 * 60 * 60 * 1000).iterator();
while (region.sizeOnServer() < numberOfPassengers) {
String name = firstNames[firstNameIndexes.next()] + " " + lastNames[lastNameIndexes.next()];
if (!region.containsKey(name)) {
final long departure = System.currentTimeMillis() + milliSeconds.next();
final long arrival = departure + milliSeconds.next();
Passenger passenger = new Passenger(name, ages.next(), flights.get(flightIndexes.next()),
new Date(departure), new Date(arrival));
region.put(passenger.getName(), passenger);
}
}
}
}
| 4,585 |
0 | Create_ds/geode-examples/indexes/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/indexes/src/main/java/org/apache/geode_examples/indexes/Example.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.indexes;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
import org.apache.geode.cache.query.FunctionDomainException;
import org.apache.geode.cache.query.NameResolutionException;
import org.apache.geode.cache.query.QueryInvocationTargetException;
import org.apache.geode.cache.query.QueryService;
import org.apache.geode.cache.query.SelectResults;
import org.apache.geode.cache.query.TypeMismatchException;
public class Example {
static String REGIONNAME = "example-region";
static String NON_INDEXED_QUERY = "SELECT DISTINCT * FROM /" + REGIONNAME;
static String TOP_LEVEL_INDEX_QUERY =
"SELECT DISTINCT * FROM /" + REGIONNAME + " p WHERE p.name LIKE $1";
static String NESTED_INDEX_QUERY =
"SELECT DISTINCT * FROM /" + REGIONNAME + " p WHERE p.flight.airlineCode=$1";
public static void main(String[] args) {
// connect to the locator using default port 10334
ClientCache cache = new ClientCacheFactory().addPoolLocator("127.0.0.1", 10334)
.set("log-level", "WARN").create();
Example example = new Example();
// create a local region that matches the server region
ClientRegionFactory<String, Passenger> clientRegionFactory =
cache.createClientRegionFactory(ClientRegionShortcut.PROXY);
Region<String, Passenger> region = clientRegionFactory.create("example-region");
QueryService queryService = cache.getQueryService();
RegionPopulator populator = new RegionPopulator();
populator.populateRegion(region);
System.out.println("Total number of passengers: "
+ example.countResults(queryService, NON_INDEXED_QUERY, new Object[] {}));
for (String lastName : populator.lastNames) {
System.out.println("Flights for " + lastName + ": " + example.countResults(queryService,
TOP_LEVEL_INDEX_QUERY, new Object[] {"%" + lastName}));
}
for (String airline : populator.airlines) {
System.out.println("Flights for " + airline + ": "
+ example.countResults(queryService, NESTED_INDEX_QUERY, new Object[] {airline}));
}
cache.close();
}
int countResults(QueryService queryService, String queryString, Object[] params) {
try {
int count = 0;
SelectResults<Passenger> results =
(SelectResults<Passenger>) queryService.newQuery(queryString).execute(params);
for (Passenger passenger : results) {
++count;
}
return count;
} catch (FunctionDomainException | TypeMismatchException | NameResolutionException
| QueryInvocationTargetException e) {
e.printStackTrace();
return -1;
}
}
}
| 4,586 |
0 | Create_ds/geode-examples/indexes/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/indexes/src/main/java/org/apache/geode_examples/indexes/FlightCode.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.indexes;
import java.io.Serializable;
public class FlightCode implements Serializable {
private static final long serialVersionUID = -4252046061229265115L;
String airlineCode;
int flightNumber;
String departure;
String arrival;
public FlightCode(String airlineCode, int flightNumber, String departure, String arrival) {
this.airlineCode = airlineCode;
this.flightNumber = flightNumber;
this.departure = departure;
this.arrival = arrival;
}
public String getAirlineCode() {
return airlineCode;
}
public int getFlightNumber() {
return flightNumber;
}
public String getDeparture() {
return departure;
}
public String getArrival() {
return arrival;
}
public String toString() {
return getAirlineCode() + String.format("%03d", getFlightNumber()) + " from " + getDeparture()
+ " to " + getArrival();
}
}
| 4,587 |
0 | Create_ds/geode-examples/indexes/src/main/java/org/apache/geode_examples | Create_ds/geode-examples/indexes/src/main/java/org/apache/geode_examples/indexes/Passenger.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode_examples.indexes;
import java.io.Serializable;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
public class Passenger implements Serializable {
private static final long serialVersionUID = -991115968572408216L;
static final DateFormat ISO_8601_TIMESTAMP_FORMAT =
new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS");
String name;
int age;
FlightCode flight;
Date departure;
Date arrival;
public Passenger(String name, int age, FlightCode flight, Date departure, Date arrival) {
this.name = name;
this.age = age;
this.flight = flight;
this.departure = departure;
this.arrival = arrival;
}
public String getName() {
return name;
}
public int getAge() {
return age;
}
public FlightCode getFlight() {
return flight;
}
public Date getDeparture() {
return departure;
}
public Date getArrival() {
return arrival;
}
public String toString() {
return getName() + ", age " + getAge() + ", flight " + getFlight() + ", departing at "
+ ISO_8601_TIMESTAMP_FORMAT.format(getDeparture()) + ", arriving at "
+ ISO_8601_TIMESTAMP_FORMAT.format(getArrival());
}
}
| 4,588 |
0 | Create_ds/flink-benchmarks/src/test/java/org/apache/flink | Create_ds/flink-benchmarks/src/test/java/org/apache/flink/benchmark/StreamNetworkBroadcastThroughputBenchmarkExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.streaming.runtime.io.benchmark.StreamNetworkBroadcastThroughputBenchmark;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import static org.openjdk.jmh.annotations.Scope.Thread;
/** JMH throughput benchmark runner. */
@OperationsPerInvocation(
value = StreamNetworkBroadcastThroughputBenchmarkExecutor.RECORDS_PER_INVOCATION)
public class StreamNetworkBroadcastThroughputBenchmarkExecutor extends BenchmarkBase {
static final int RECORDS_PER_INVOCATION = 500_000;
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(
".*"
+ StreamNetworkBroadcastThroughputBenchmarkExecutor.class
.getCanonicalName()
+ ".*")
.build();
new Runner(options).run();
}
@Benchmark
public void networkBroadcastThroughput(MultiEnvironment context) throws Exception {
context.executeBenchmark(RECORDS_PER_INVOCATION);
}
/** Setup for the benchmark(s). */
@State(Thread)
public static class MultiEnvironment extends StreamNetworkBroadcastThroughputBenchmark {
@Setup
public void setUp() throws Exception {
super.setUp(4, 100, 100);
}
@TearDown
public void tearDown() throws Exception {
super.tearDown();
}
}
}
| 4,589 |
0 | Create_ds/flink-benchmarks/src/test/java/org/apache/flink | Create_ds/flink-benchmarks/src/test/java/org/apache/flink/benchmark/DataSkewStreamNetworkThroughputBenchmarkExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.runtime.io.benchmark.DataSkewStreamNetworkThroughputBenchmark;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import static org.openjdk.jmh.annotations.Scope.Thread;
/** JMH throughput benchmark runner for data skew scenario. */
@OperationsPerInvocation(
value = DataSkewStreamNetworkThroughputBenchmarkExecutor.RECORDS_PER_INVOCATION)
public class DataSkewStreamNetworkThroughputBenchmarkExecutor extends BenchmarkBase {
static final int RECORDS_PER_INVOCATION = 5_000_000;
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(
".*"
+ DataSkewStreamNetworkThroughputBenchmarkExecutor.class
.getCanonicalName()
+ ".*")
.build();
new Runner(options).run();
}
@Benchmark
public void networkSkewedThroughput(MultiEnvironment context) throws Exception {
context.executeBenchmark(RECORDS_PER_INVOCATION);
}
/** Setup for the benchmark(s). */
@State(Thread)
public static class MultiEnvironment extends DataSkewStreamNetworkThroughputBenchmark {
// 1ms buffer timeout
private final int flushTimeout = 1;
// 1000 num of channels (subpartitions)
private final int channels = 1000;
// 10 writer threads, to increase the load on the machine
private final int writers = 10;
@Setup
public void setUp() throws Exception {
setUp(writers, channels, flushTimeout, false, false, -1, -1, new Configuration());
}
}
}
| 4,590 |
0 | Create_ds/flink-benchmarks/src/test/java/org/apache/flink | Create_ds/flink-benchmarks/src/test/java/org/apache/flink/benchmark/StreamNetworkThroughputBenchmarkExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.net.SSLUtilsTest;
import org.apache.flink.streaming.runtime.io.benchmark.StreamNetworkThroughputBenchmark;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import java.util.Arrays;
import static org.apache.flink.util.Preconditions.checkArgument;
import static org.openjdk.jmh.annotations.Scope.Thread;
/** JMH throughput benchmark runner. */
@OperationsPerInvocation(value = StreamNetworkThroughputBenchmarkExecutor.RECORDS_PER_INVOCATION)
public class StreamNetworkThroughputBenchmarkExecutor extends BenchmarkBase {
static final int RECORDS_PER_INVOCATION = 5_000_000;
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(
".*"
+ StreamNetworkThroughputBenchmarkExecutor.class
.getCanonicalName()
+ ".*")
.build();
new Runner(options).run();
}
@Benchmark
public void networkThroughput(MultiEnvironment context) throws Exception {
context.executeBenchmark(RECORDS_PER_INVOCATION);
}
/** Setup for the benchmark(s). */
@State(Thread)
public static class MultiEnvironment extends StreamNetworkThroughputBenchmark {
@Param({
"100,100ms",
"100,100ms,SSL",
"1000,1ms",
"1000,100ms",
"1000,100ms,SSL",
"1000,100ms,OpenSSL"
})
public String channelsFlushTimeout = "100,100ms";
// Do not spam continuous benchmarking with number of writers parameter.
// @Param({"1", "4"})
public int writers = 4;
private static String parseEnableSSL(String channelsFlushTimeout) {
String[] parameters = channelsFlushTimeout.split(",");
if (Arrays.asList(parameters).contains("SSL")) {
return "JDK";
} else if (Arrays.asList(parameters).contains("OpenSSL")) {
return "OPENSSL";
} else {
return null;
}
}
private static int parseFlushTimeout(String channelsFlushTimeout) {
String[] parameters = channelsFlushTimeout.split(",");
checkArgument(parameters.length >= 2);
String flushTimeout = parameters[1];
checkArgument(flushTimeout.endsWith("ms"));
return Integer.parseInt(flushTimeout.substring(0, flushTimeout.length() - 2));
}
private static int parseChannels(String channelsFlushTimeout) {
String[] parameters = channelsFlushTimeout.split(",");
checkArgument(parameters.length >= 1);
return Integer.parseInt(parameters[0]);
}
@Setup
public void setUp() throws Exception {
int channels = parseChannels(channelsFlushTimeout);
int flushTimeout = parseFlushTimeout(channelsFlushTimeout);
String sslProvider = parseEnableSSL(channelsFlushTimeout);
setUp(
writers,
channels,
flushTimeout,
false,
false,
-1,
-1,
sslProvider != null
? SSLUtilsTest.createInternalSslConfigWithKeyAndTrustStores(sslProvider)
: new Configuration());
}
@TearDown
public void tearDown() throws Exception {
super.tearDown();
}
}
}
| 4,591 |
0 | Create_ds/flink-benchmarks/src/test/java/org/apache/flink | Create_ds/flink-benchmarks/src/test/java/org/apache/flink/benchmark/StreamNetworkLatencyBenchmarkExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.streaming.runtime.io.benchmark.StreamNetworkPointToPointBenchmark;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.openjdk.jmh.annotations.Mode.AverageTime;
import static org.openjdk.jmh.annotations.Scope.Thread;
/** JMH latency benchmark runner. */
@OutputTimeUnit(MILLISECONDS)
@BenchmarkMode(AverageTime)
public class StreamNetworkLatencyBenchmarkExecutor extends BenchmarkBase {
private static final int RECORDS_PER_INVOCATION = 100;
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(
".*"
+ StreamNetworkLatencyBenchmarkExecutor.class
.getCanonicalName()
+ ".*")
.build();
new Runner(options).run();
}
@Benchmark
public void networkLatency1to1(Environment context) throws Exception {
context.executeBenchmark(RECORDS_PER_INVOCATION, false);
}
/** Setup for the benchmark(s). */
@State(Thread)
public static class Environment extends StreamNetworkPointToPointBenchmark {
@Setup
public void setUp() throws Exception {
super.setUp(10);
}
@TearDown
public void tearDown() {
super.tearDown();
}
}
}
| 4,592 |
0 | Create_ds/flink-benchmarks/src/test/java/org/apache/flink | Create_ds/flink-benchmarks/src/test/java/org/apache/flink/config/ConfigUtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.config;
import org.apache.flink.configuration.Configuration;
import org.junit.Assert;
import org.junit.Test;
public class ConfigUtilTest {
@Test
public void testLoadConf() {
Configuration cfg = ConfigUtil.loadBenchMarkConf();
String dir = cfg.getString(StateBenchmarkOptions.STATE_DATA_DIR);
Assert.assertEquals("/tmp/data", dir);
}
}
| 4,593 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/FlinkEnvironmentContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.DeploymentOptions;
import org.apache.flink.configuration.NettyShuffleEnvironmentOptions;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.runtime.minicluster.MiniCluster;
import org.apache.flink.runtime.minicluster.MiniClusterConfiguration;
import org.apache.flink.runtime.state.memory.MemoryStateBackend;
import org.apache.flink.streaming.api.environment.ExecutionCheckpointingOptions;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.test.util.MiniClusterPipelineExecutorServiceLoader;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import java.time.Duration;
import static org.apache.flink.configuration.ResourceManagerOptions.REQUIREMENTS_CHECK_DELAY;
import static org.openjdk.jmh.annotations.Scope.Thread;
@State(Thread)
public class FlinkEnvironmentContext {
public static final int NUM_NETWORK_BUFFERS = 1000;
protected final int parallelism = 1;
protected final boolean objectReuse = true;
public StreamExecutionEnvironment env;
public MiniCluster miniCluster;
@Setup
public void setUp() throws Exception {
if (miniCluster != null) {
throw new RuntimeException("setUp was called multiple times!");
}
final Configuration clusterConfig = createConfiguration();
miniCluster =
new MiniCluster(
new MiniClusterConfiguration.Builder()
.setNumSlotsPerTaskManager(getNumberOfSlotsPerTaskManager())
.setNumTaskManagers(getNumberOfTaskManagers())
.setConfiguration(clusterConfig)
.build());
try {
miniCluster.start();
} catch (Exception e) {
throw new RuntimeException(e);
}
// set up the execution environment
env =
new StreamExecutionEnvironment(
new MiniClusterPipelineExecutorServiceLoader(miniCluster),
clusterConfig,
null);
env.setParallelism(parallelism);
if (objectReuse) {
env.getConfig().enableObjectReuse();
}
env.setRestartStrategy(RestartStrategies.noRestart());
env.setStateBackend(new MemoryStateBackend());
}
@TearDown
public void tearDown() throws Exception {
miniCluster.close();
miniCluster = null;
}
protected int getNumberOfTaskManagers() {
return 1;
}
protected int getNumberOfSlotsPerTaskManager() {
return 4;
}
public void execute() throws Exception {
env.execute();
}
protected Configuration createConfiguration() {
final Configuration configuration = new Configuration();
configuration.setString(RestOptions.BIND_PORT, "0");
configuration.setInteger(
NettyShuffleEnvironmentOptions.NETWORK_NUM_BUFFERS, NUM_NETWORK_BUFFERS);
configuration.set(DeploymentOptions.TARGET, MiniClusterPipelineExecutorServiceLoader.NAME);
configuration.set(DeploymentOptions.ATTACHED, true);
// It doesn't make sense to wait for the final checkpoint in benchmarks since it only prolongs
// the test but doesn't give any advantages.
configuration.set(ExecutionCheckpointingOptions.ENABLE_CHECKPOINTS_AFTER_TASKS_FINISH, false);
// TODO: remove this line after FLINK-28243 will be done
configuration.set(REQUIREMENTS_CHECK_DELAY, Duration.ZERO);
return configuration;
}
}
| 4,594 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/CollectSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import java.util.ArrayList;
import java.util.List;
/** Created by pnowojski on 7/5/17. */
public class CollectSink<T> implements SinkFunction<T> {
public final List<T> result = new ArrayList<>();
@Override
public void invoke(T value) throws Exception {
result.add(value);
}
}
| 4,595 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/BenchmarkBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.openjdk.jmh.annotations.Mode.Throughput;
import static org.openjdk.jmh.annotations.Scope.Thread;
@SuppressWarnings("MethodMayBeStatic")
@State(Thread)
@OutputTimeUnit(MILLISECONDS)
@BenchmarkMode(Throughput)
@Fork(
value = 3,
jvmArgsAppend = {
"-Djava.rmi.server.hostname=127.0.0.1",
"-Dcom.sun.management.jmxremote.authenticate=false",
"-Dcom.sun.management.jmxremote.ssl=false",
"-Dcom.sun.management.jmxremote.ssl"
})
@Warmup(iterations = 10)
@Measurement(iterations = 10)
public class BenchmarkBase {}
| 4,596 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/ProcessingTimerBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.core.testutils.OneShotLatch;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.streaming.api.functions.sink.DiscardingSink;
import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction;
import org.apache.flink.util.Collector;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import java.util.Random;
@OperationsPerInvocation(value = ProcessingTimerBenchmark.PROCESSING_TIMERS_PER_INVOCATION)
public class ProcessingTimerBenchmark extends BenchmarkBase {
public static final int PROCESSING_TIMERS_PER_INVOCATION = 150_000;
private static final int PARALLELISM = 1;
private static OneShotLatch LATCH = new OneShotLatch();
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(".*" + ProcessingTimerBenchmark.class.getCanonicalName() + ".*")
.build();
new Runner(options).run();
}
@Benchmark
public void fireProcessingTimers(FlinkEnvironmentContext context) throws Exception {
LATCH.reset();
StreamExecutionEnvironment env = context.env;
env.setParallelism(PARALLELISM);
env.addSource(new SingleRecordSource())
.keyBy(String::hashCode)
.process(new ProcessingTimerKeyedProcessFunction(PROCESSING_TIMERS_PER_INVOCATION))
.addSink(new DiscardingSink<>());
env.execute();
}
private static class SingleRecordSource extends RichParallelSourceFunction<String> {
private Random random;
public SingleRecordSource() {}
@Override
public void open(Configuration parameters) throws Exception {
this.random = new Random();
}
@Override
public void run(SourceContext<String> sourceContext) throws Exception {
synchronized (sourceContext.getCheckpointLock()) {
sourceContext.collect(String.valueOf(random.nextLong()));
}
LATCH.await();
}
@Override
public void cancel() {}
}
private static class ProcessingTimerKeyedProcessFunction
extends KeyedProcessFunction<Integer, String, String> {
private final long timersPerRecord;
private long firedTimesCount;
public ProcessingTimerKeyedProcessFunction(long timersPerRecord) {
this.timersPerRecord = timersPerRecord;
}
@Override
public void open(Configuration parameters) throws Exception {
this.firedTimesCount = 0;
}
@Override
public void processElement(String s, Context context, Collector<String> collector)
throws Exception {
final long currTimestamp = System.currentTimeMillis();
for (int i = 0; i < timersPerRecord; i++) {
context.timerService().registerProcessingTimeTimer(currTimestamp - i - 1);
}
}
@Override
public void onTimer(long timestamp, OnTimerContext ctx, Collector<String> out)
throws Exception {
if (++firedTimesCount == timersPerRecord) {
LATCH.trigger();
}
}
}
}
| 4,597 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/MemoryStateBackendBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.benchmark.functions.IntLongApplications;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import static org.openjdk.jmh.annotations.Scope.Thread;
@OperationsPerInvocation(value = MemoryStateBackendBenchmark.RECORDS_PER_INVOCATION)
public class MemoryStateBackendBenchmark extends StateBackendBenchmarkBase {
public static final int RECORDS_PER_INVOCATION = 7_000_000;
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(".*" + MemoryStateBackendBenchmark.class.getCanonicalName() + ".*")
.build();
new Runner(options).run();
}
@Benchmark
public void stateBackends(MemoryStateBackendContext context) throws Exception {
IntLongApplications.reduceWithWindow(
context.source, TumblingEventTimeWindows.of(Time.seconds(10_000)));
context.execute();
}
@State(Thread)
public static class MemoryStateBackendContext extends StateBackendContext {
@Param({"MEMORY", "FS", "FS_ASYNC"})
public StateBackend stateBackend = StateBackend.MEMORY;
@Override
public void setUp() throws Exception {
super.setUp(stateBackend, RECORDS_PER_INVOCATION);
}
}
}
| 4,598 |
0 | Create_ds/flink-benchmarks/src/main/java/org/apache/flink | Create_ds/flink-benchmarks/src/main/java/org/apache/flink/benchmark/WindowBenchmarks.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.benchmark;
import org.apache.flink.benchmark.functions.IntLongApplications;
import org.apache.flink.benchmark.functions.IntegerLongSource;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.windowing.assigners.EventTimeSessionWindows;
import org.apache.flink.streaming.api.windowing.assigners.GlobalWindows;
import org.apache.flink.streaming.api.windowing.assigners.SlidingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
@OperationsPerInvocation(value = WindowBenchmarks.RECORDS_PER_INVOCATION)
public class WindowBenchmarks extends BenchmarkBase {
public static final int RECORDS_PER_INVOCATION = 7_000_000;
public static void main(String[] args) throws RunnerException {
Options options =
new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.include(".*" + WindowBenchmarks.class.getCanonicalName() + ".*")
.build();
new Runner(options).run();
}
@Benchmark
public void globalWindow(TimeWindowContext context) throws Exception {
IntLongApplications.reduceWithWindow(context.source, GlobalWindows.create());
context.execute();
}
@Benchmark
public void tumblingWindow(TimeWindowContext context) throws Exception {
IntLongApplications.reduceWithWindow(
context.source, TumblingEventTimeWindows.of(Time.seconds(10_000)));
context.execute();
}
@Benchmark
public void slidingWindow(TimeWindowContext context) throws Exception {
IntLongApplications.reduceWithWindow(
context.source,
SlidingEventTimeWindows.of(Time.seconds(10_000), Time.seconds(1000)));
context.execute();
}
@Benchmark
public void sessionWindow(TimeWindowContext context) throws Exception {
IntLongApplications.reduceWithWindow(
context.source, EventTimeSessionWindows.withGap(Time.seconds(500)));
context.execute();
}
public static class TimeWindowContext extends FlinkEnvironmentContext {
public final int numberOfElements = 1000;
public DataStreamSource<IntegerLongSource.Record> source;
@Override
public void setUp() throws Exception {
super.setUp();
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
source = env.addSource(new IntegerLongSource(numberOfElements, RECORDS_PER_INVOCATION));
}
}
}
| 4,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.