index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cluster | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cluster/management/Compaction.java | /**
* Copyright 2018 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.cluster.management;
import com.netflix.priam.backup.BackupRestoreUtil;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.connection.CassandraOperations;
import com.netflix.priam.merics.CompactionMeasurement;
import com.netflix.priam.scheduler.CronTimer;
import com.netflix.priam.scheduler.TaskTimer;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.collections4.CollectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Utility class to compact the keyspaces/columnfamilies Created by aagrawal on 1/25/18. */
@Singleton
public class Compaction extends IClusterManagement<String> {
private static final Logger logger = LoggerFactory.getLogger(Compaction.class);
private final IConfiguration config;
private final CassandraOperations cassandraOperations;
@Inject
public Compaction(
IConfiguration config,
CassandraOperations cassandraOperations,
CompactionMeasurement compactionMeasurement) {
super(config, Task.COMPACTION, compactionMeasurement);
this.config = config;
this.cassandraOperations = cassandraOperations;
}
final Map<String, List<String>> getCompactionIncludeFilter(IConfiguration config)
throws Exception {
Map<String, List<String>> columnFamilyFilter =
BackupRestoreUtil.getFilter(config.getCompactionIncludeCFList());
logger.info("Compaction: Override for include CF provided by user: {}", columnFamilyFilter);
return columnFamilyFilter;
}
final Map<String, List<String>> getCompactionExcludeFilter(IConfiguration config)
throws Exception {
Map<String, List<String>> columnFamilyFilter =
BackupRestoreUtil.getFilter(config.getCompactionExcludeCFList());
logger.info("Compaction: Override for exclude CF provided by user: {}", columnFamilyFilter);
return columnFamilyFilter;
}
final Map<String, List<String>> getCompactionFilterCfs(IConfiguration config) throws Exception {
final Map<String, List<String>> includeFilter = getCompactionIncludeFilter(config);
final Map<String, List<String>> excludeFilter = getCompactionExcludeFilter(config);
final Map<String, List<String>> allColumnfamilies = cassandraOperations.getColumnfamilies();
Map<String, List<String>> result = new HashMap<>();
allColumnfamilies.forEach(
(keyspaceName, columnfamilies) -> {
if (SchemaConstant.isSystemKeyspace(
keyspaceName)) // no need to compact system keyspaces.
return;
if (excludeFilter != null && excludeFilter.containsKey(keyspaceName)) {
List<String> excludeCFFilter = excludeFilter.get(keyspaceName);
// Is CF list null/empty? If yes, then exclude all CF's for this keyspace.
if (excludeCFFilter == null || excludeCFFilter.isEmpty()) return;
columnfamilies =
(List<String>)
CollectionUtils.removeAll(columnfamilies, excludeCFFilter);
}
if (includeFilter != null) {
// Include filter is not empty and this keyspace is not provided in include
// filter. Ignore processing of this keyspace.
if (!includeFilter.containsKey(keyspaceName)) return;
List<String> includeCFFilter = includeFilter.get(keyspaceName);
// If include filter is empty or null, it means include all.
// If not, then we need to find intersection of CF's which are present and
// one which are configured to compact.
if (includeCFFilter != null
&& !includeCFFilter
.isEmpty()) // If include filter is empty or null, it means
// include all.
columnfamilies =
(List<String>)
CollectionUtils.intersection(
columnfamilies, includeCFFilter);
}
if (columnfamilies != null && !columnfamilies.isEmpty())
result.put(keyspaceName, columnfamilies);
});
return result;
}
/*
* @return the keyspace(s) compacted. List can be empty but never null.
*/
protected String runTask() throws Exception {
final Map<String, List<String>> columnfamilies = getCompactionFilterCfs(config);
if (!columnfamilies.isEmpty())
for (Map.Entry<String, List<String>> entry : columnfamilies.entrySet()) {
cassandraOperations.forceKeyspaceCompaction(
entry.getKey(), entry.getValue().toArray(new String[0]));
}
return columnfamilies.toString();
}
/**
* Timer to be used for compaction interval.
*
* @param config {@link IConfiguration} to get configuration details from priam.
* @return the timer to be used for compaction interval from {@link
* IConfiguration#getCompactionCronExpression()}
* @throws Exception If the cron expression is invalid.
*/
public static TaskTimer getTimer(IConfiguration config) throws Exception {
return CronTimer.getCronTimer(Task.COMPACTION.name(), config.getCompactionCronExpression());
}
}
| 3,200 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cluster | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cluster/management/Flush.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.cluster.management;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.connection.CassandraOperations;
import com.netflix.priam.merics.NodeToolFlushMeasurement;
import com.netflix.priam.scheduler.CronTimer;
import com.netflix.priam.scheduler.TaskTimer;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutionException;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Utility to flush Keyspaces from memtable to disk Created by vinhn on 10/12/16. */
@Singleton
public class Flush extends IClusterManagement<String> {
private static final Logger logger = LoggerFactory.getLogger(Flush.class);
private final IConfiguration config;
private final CassandraOperations cassandraOperations;
private List<String> keyspaces = new ArrayList<>();
@Inject
public Flush(
IConfiguration config,
CassandraOperations cassandraOperations,
NodeToolFlushMeasurement nodeToolFlushMeasurement) {
super(config, Task.FLUSH, nodeToolFlushMeasurement);
this.config = config;
this.cassandraOperations = cassandraOperations;
}
@Override
/*
* @return the keyspace(s) flushed. List can be empty but never null.
*/
protected String runTask() throws Exception {
List<String> flushed = new ArrayList<>();
// Get keyspaces to flush
deriveKeyspaces();
if (this.keyspaces == null || this.keyspaces.isEmpty()) {
logger.warn("NO op on requested \"flush\" as there are no keyspaces.");
return flushed.toString();
}
// If flush is for certain keyspaces, validate keyspace exist
for (String keyspace : keyspaces) {
if (!cassandraOperations.getKeyspaces().contains(keyspace)) {
throw new IllegalArgumentException("Keyspace [" + keyspace + "] does not exist.");
}
if (SchemaConstant.isSystemKeyspace(keyspace)) // no need to flush system keyspaces.
continue;
try {
cassandraOperations.forceKeyspaceFlush(keyspace);
flushed.add(keyspace);
} catch (IOException | ExecutionException | InterruptedException e) {
throw new Exception("Exception during flushing keyspace: " + keyspace, e);
}
}
return flushed.toString();
}
/*
Derive keyspace(s) to flush in the following order: explicit list provided by caller, property, or all keyspaces.
*/
private void deriveKeyspaces() throws Exception {
// == get value from property
String raw = this.config.getFlushKeyspaces();
if (!StringUtils.isEmpty(raw)) {
String k[] = raw.split(",");
for (int i = 0; i < k.length; i++) {
this.keyspaces.add(i, k[i]);
}
return;
}
// == no override via FP, default to all keyspaces
this.keyspaces = cassandraOperations.getKeyspaces();
}
/**
* Timer to be used for flush interval.
*
* @param config {@link IConfiguration} to get configuration details from priam.
* @return the timer to be used for compaction interval from {@link
* IConfiguration#getFlushCronExpression()}
* @throws Exception If the cron expression is invalid.
*/
public static TaskTimer getTimer(IConfiguration config) throws Exception {
return CronTimer.getCronTimer(Task.FLUSH.name(), config.getFlushCronExpression());
}
}
| 3,201 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cluster | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cluster/management/IClusterManagement.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.cluster.management;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.health.CassandraMonitor;
import com.netflix.priam.merics.IMeasurement;
import com.netflix.priam.scheduler.Task;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Created by vinhn on 10/12/16. */
public abstract class IClusterManagement<T> extends Task {
public enum Task {
FLUSH,
COMPACTION
}
private static final Logger logger = LoggerFactory.getLogger(IClusterManagement.class);
private final Task taskType;
private final IMeasurement measurement;
private static final Lock lock = new ReentrantLock();
protected IClusterManagement(IConfiguration config, Task taskType, IMeasurement measurement) {
super(config);
this.taskType = taskType;
this.measurement = measurement;
}
@Override
public void execute() throws Exception {
if (!CassandraMonitor.hasCassadraStarted()) {
logger.debug("Cassandra has not started, hence {} will not run", taskType);
return;
}
if (!lock.tryLock()) {
logger.error("Operation is already running! Try again later.");
throw new Exception("Operation already running");
}
try {
String result = runTask();
measurement.incrementSuccess();
logger.info(
"Successfully finished executing the cluster management task: {} with result: {}",
taskType,
result);
if (result.isEmpty()) {
logger.warn(
"{} task completed successfully but no action was done.", taskType.name());
}
} catch (Exception e) {
measurement.incrementFailure();
throw new Exception("Exception during execution of operation: " + taskType.name(), e);
} finally {
lock.unlock();
}
}
@Override
public String getName() {
return taskType.name();
}
protected abstract String runTask() throws Exception;
}
| 3,202 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cluster | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cluster/management/ClusterManagementService.java | package com.netflix.priam.cluster.management;
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.defaultimpl.IService;
import com.netflix.priam.scheduler.PriamScheduler;
import javax.inject.Inject;
public class ClusterManagementService implements IService {
private final PriamScheduler scheduler;
private final IConfiguration config;
@Inject
public ClusterManagementService(IConfiguration configuration, PriamScheduler priamScheduler) {
this.scheduler = priamScheduler;
this.config = configuration;
}
@Override
public void scheduleService() throws Exception {
// Set up nodetool flush task
scheduleTask(scheduler, Flush.class, Flush.getTimer(config));
// Set up compaction task
scheduleTask(scheduler, Compaction.class, Compaction.getTimer(config));
}
@Override
public void updateServicePre() throws Exception {}
@Override
public void updateServicePost() throws Exception {}
}
| 3,203 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/compress/SnappyCompression.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.compress;
import java.io.*;
import org.apache.commons.io.IOUtils;
import org.xerial.snappy.SnappyInputStream;
/** Class to generate compressed chunks of data from an input stream using SnappyCompression */
public class SnappyCompression implements ICompression {
private static final int BUFFER = 2 * 1024;
@Override
public void decompressAndClose(InputStream input, OutputStream output) throws IOException {
try {
decompress(input, output);
} finally {
IOUtils.closeQuietly(input);
IOUtils.closeQuietly(output);
}
}
private void decompress(InputStream input, OutputStream output) throws IOException {
byte data[] = new byte[BUFFER];
try (BufferedOutputStream dest1 = new BufferedOutputStream(output, BUFFER);
SnappyInputStream is = new SnappyInputStream(new BufferedInputStream(input))) {
int c;
while ((c = is.read(data, 0, BUFFER)) != -1) {
dest1.write(data, 0, c);
}
}
}
}
| 3,204 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/compress/ICompression.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.compress;
import com.google.inject.ImplementedBy;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
@ImplementedBy(SnappyCompression.class)
public interface ICompression {
/**
* Uncompress the input stream and write to the output stream. Closes both input and output
* streams
*/
void decompressAndClose(InputStream input, OutputStream output) throws IOException;
}
| 3,205 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/compress/ChunkedStream.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.compress;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Iterator;
import org.apache.commons.io.IOUtils;
import org.xerial.snappy.SnappyOutputStream;
/** Byte iterator representing compressed data. Uses snappy compression */
public class ChunkedStream implements Iterator<byte[]> {
private static final int BYTES_TO_READ = 2048;
private boolean hasnext = true;
private final ByteArrayOutputStream bos;
private final SnappyOutputStream snappy;
private final InputStream origin;
private final long chunkSize;
private final CompressionType compression;
public ChunkedStream(InputStream is, long chunkSize) {
this(is, chunkSize, CompressionType.NONE);
}
public ChunkedStream(InputStream is, long chunkSize, CompressionType compression) {
this.origin = is;
this.bos = new ByteArrayOutputStream();
this.snappy = new SnappyOutputStream(bos);
this.chunkSize = chunkSize;
this.compression = compression;
}
@Override
public boolean hasNext() {
return hasnext;
}
@Override
public byte[] next() {
try {
byte data[] = new byte[BYTES_TO_READ];
int count;
while ((count = origin.read(data, 0, data.length)) != -1) {
switch (compression) {
case NONE:
bos.write(data, 0, count);
break;
case SNAPPY:
snappy.write(data, 0, count);
break;
default:
throw new IllegalArgumentException("Snappy compression only.");
}
if (bos.size() >= chunkSize) return returnSafe();
}
// We don't have anything else to read hence set to false.
return done();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private byte[] done() throws IOException {
if (compression == CompressionType.SNAPPY) snappy.flush();
byte[] return_ = bos.toByteArray();
hasnext = false;
IOUtils.closeQuietly(snappy);
IOUtils.closeQuietly(bos);
IOUtils.closeQuietly(origin);
return return_;
}
private byte[] returnSafe() throws IOException {
byte[] return_ = bos.toByteArray();
bos.reset();
return return_;
}
@Override
public void remove() {}
}
| 3,206 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/compress/CompressionType.java | package com.netflix.priam.compress;
public enum CompressionType {
SNAPPY,
LZ4,
NONE
}
| 3,207 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/identity/DoubleRing.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.identity;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.identity.config.InstanceInfo;
import com.netflix.priam.utils.ITokenManager;
import java.io.*;
import java.util.Set;
import java.util.stream.Collectors;
import javax.inject.Inject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Class providing functionality for doubling the ring */
public class DoubleRing {
private static final Logger logger = LoggerFactory.getLogger(DoubleRing.class);
private static File TMP_BACKUP_FILE;
private final IConfiguration config;
private final IPriamInstanceFactory factory;
private final ITokenManager tokenManager;
private final InstanceInfo instanceInfo;
@Inject
public DoubleRing(
IConfiguration config,
IPriamInstanceFactory factory,
ITokenManager tokenManager,
InstanceInfo instanceInfo) {
this.config = config;
this.factory = factory;
this.tokenManager = tokenManager;
this.instanceInfo = instanceInfo;
}
/**
* Doubling is done by pre-calculating all slots of a double ring and registering them. When new
* nodes come up, they will get the unsed token assigned per token logic.
*/
public void doubleSlots() {
Set<PriamInstance> local = getInstancesInSameRegion();
// delete all
for (PriamInstance data : local) factory.delete(data);
int hash = tokenManager.regionOffset(instanceInfo.getRegion());
// move existing slots.
for (PriamInstance data : local) {
int slot = (data.getId() - hash) * 2;
factory.create(
data.getApp(),
hash + slot,
data.getInstanceId(),
data.getHostName(),
data.getHostIP(),
data.getRac(),
data.getVolumes(),
data.getToken());
}
int new_ring_size = local.size() * 2;
for (PriamInstance data : getInstancesInSameRegion()) {
// if max then rotate.
int currentSlot = data.getId() - hash;
int new_slot =
currentSlot + 3 > new_ring_size
? (currentSlot + 3) - new_ring_size
: currentSlot + 3;
String token =
tokenManager.createToken(new_slot, new_ring_size, instanceInfo.getRegion());
factory.create(
data.getApp(),
new_slot + hash,
InstanceIdentity.DUMMY_INSTANCE_ID,
instanceInfo.getHostname(),
config.usePrivateIP() ? instanceInfo.getPrivateIP() : instanceInfo.getHostIP(),
data.getRac(),
null,
token);
}
}
// filter other DC's
private Set<PriamInstance> getInstancesInSameRegion() {
return factory.getAllIds(config.getAppName())
.stream()
.filter(i -> i.getDC().equals(instanceInfo.getRegion()))
.collect(Collectors.toSet());
}
/** Backup the current state in case of failure */
public void backup() throws IOException {
// writing to the backup file.
TMP_BACKUP_FILE = File.createTempFile("Backup-instance-data", ".dat");
try (ObjectOutputStream stream =
new ObjectOutputStream(new FileOutputStream(TMP_BACKUP_FILE))) {
stream.writeObject(getInstancesInSameRegion());
logger.info(
"Wrote the backup of the instances to: {}", TMP_BACKUP_FILE.getAbsolutePath());
}
}
/**
* Restore tokens if a failure occurs
*
* @throws IOException
* @throws ClassNotFoundException
*/
public void restore() throws IOException, ClassNotFoundException {
for (PriamInstance data : getInstancesInSameRegion()) factory.delete(data);
// read from the file.
try (ObjectInputStream stream =
new ObjectInputStream(new FileInputStream(TMP_BACKUP_FILE))) {
@SuppressWarnings("unchecked")
Set<PriamInstance> allInstances = (Set<PriamInstance>) stream.readObject();
for (PriamInstance data : allInstances)
factory.create(
data.getApp(),
data.getId(),
data.getInstanceId(),
data.getHostName(),
data.getHostIP(),
data.getRac(),
data.getVolumes(),
data.getToken());
logger.info(
"Successfully restored the Instances from the backup: {}",
TMP_BACKUP_FILE.getAbsolutePath());
}
}
}
| 3,208 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/identity/InstanceIdentity.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.identity;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import com.google.common.collect.ListMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.Multimaps;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.identity.config.InstanceInfo;
import com.netflix.priam.identity.token.ITokenRetriever;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import javax.inject.Inject;
import javax.inject.Singleton;
/**
* This class provides the central place to create and consume the identity of the instance - token,
* seeds etc.
*/
@Singleton
public class InstanceIdentity {
public static final String DUMMY_INSTANCE_ID = "new_slot";
private final ListMultimap<String, PriamInstance> locMap =
Multimaps.newListMultimap(new HashMap<>(), Lists::newArrayList);
private final IPriamInstanceFactory factory;
private final IMembership membership;
private final IConfiguration config;
private final Predicate<PriamInstance> differentHostPredicate =
new Predicate<PriamInstance>() {
@Override
public boolean apply(PriamInstance instance) {
return (!instance.getInstanceId().equalsIgnoreCase(DUMMY_INSTANCE_ID)
&& !instance.getHostName().equals(myInstance.getHostName()));
}
};
private PriamInstance myInstance;
// Instance information contains other information like ASG/vpc-id etc.
private InstanceInfo myInstanceInfo;
private boolean isReplace;
private boolean isTokenPregenerated;
private String replacedIp;
@Inject
// Note: do not parameterized the generic type variable to an implementation as
// it confuses
// Guice in the binding.
public InstanceIdentity(
IPriamInstanceFactory factory,
IMembership membership,
IConfiguration config,
InstanceInfo instanceInfo,
ITokenRetriever tokenRetriever)
throws Exception {
this.factory = factory;
this.membership = membership;
this.config = config;
this.myInstanceInfo = instanceInfo;
this.myInstance = tokenRetriever.get();
this.replacedIp = tokenRetriever.getReplacedIp().orElse(null);
this.isReplace = replacedIp != null;
this.isTokenPregenerated = tokenRetriever.isTokenPregenerated();
}
public PriamInstance getInstance() {
return myInstance;
}
public InstanceInfo getInstanceInfo() {
return myInstanceInfo;
}
private void populateRacMap() {
locMap.clear();
factory.getAllIds(config.getAppName()).forEach(ins -> locMap.put(ins.getRac(), ins));
}
public List<String> getSeeds() {
populateRacMap();
List<String> seeds = new LinkedList<>();
// Handle single zone deployment
if (config.getRacs().size() == 1) {
// Return empty list if all nodes are not up
if (membership.getRacMembershipSize() != locMap.get(myInstance.getRac()).size())
return seeds;
// If seed node, return the next node in the list
if (locMap.get(myInstance.getRac()).size() > 1
&& locMap.get(myInstance.getRac())
.get(0)
.getHostIP()
.equals(myInstance.getHostIP())) {
PriamInstance instance = locMap.get(myInstance.getRac()).get(1);
if (instance != null && !isInstanceDummy(instance)) {
if (config.isMultiDC()) seeds.add(instance.getHostIP());
else seeds.add(instance.getHostName());
}
}
}
for (String loc : locMap.keySet()) {
PriamInstance instance =
Iterables.tryFind(locMap.get(loc), differentHostPredicate).orNull();
if (instance != null && !isInstanceDummy(instance)) {
if (config.isMultiDC()) seeds.add(instance.getHostIP());
else seeds.add(instance.getHostName());
}
}
return seeds;
}
public boolean isSeed() {
populateRacMap();
String ip = locMap.get(myInstance.getRac()).get(0).getHostName();
return myInstance.getHostName().equals(ip);
}
public boolean isReplace() {
return isReplace;
}
public boolean isTokenPregenerated() {
return isTokenPregenerated;
}
public String getReplacedIp() {
return replacedIp;
}
public void setReplacedIp(String replacedIp) {
this.replacedIp = replacedIp;
if (!replacedIp.isEmpty()) this.isReplace = true;
}
private static boolean isInstanceDummy(PriamInstance instance) {
return instance.getInstanceId().equals(DUMMY_INSTANCE_ID);
}
}
| 3,209 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/identity/PriamInstance.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.identity;
import java.io.Serializable;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class PriamInstance implements Serializable {
private static final long serialVersionUID = 5606412386974488659L;
private static final Logger logger = LoggerFactory.getLogger(PriamInstance.class);
private String hostname;
private long updatetime;
private boolean outOfService;
private String app;
private int Id;
private String instanceId;
private String availabilityZone;
private String publicip;
private String location;
private String token;
// Handles Storage objects
private Map<String, Object> volumes;
public String getApp() {
return app;
}
public void setApp(String app) {
this.app = app;
}
public int getId() {
return Id;
}
public void setId(int id) {
Id = id;
}
public String getInstanceId() {
return instanceId;
}
public void setInstanceId(String instanceId) {
this.instanceId = instanceId;
}
public String getRac() {
return availabilityZone;
}
public void setRac(String availabilityZone) {
this.availabilityZone = availabilityZone;
}
public String getHostName() {
return hostname;
}
public String getHostIP() {
return publicip;
}
public void setHost(String hostname, String publicip) {
this.hostname = hostname;
this.publicip = publicip;
}
public void setHost(String hostname) {
this.hostname = hostname;
}
public void setHostIP(String publicip) {
this.publicip = publicip;
}
public String getToken() {
return token;
}
public void setToken(String token) {
this.token = token;
}
public Map<String, Object> getVolumes() {
return volumes;
}
public void setVolumes(Map<String, Object> volumes) {
this.volumes = volumes;
}
@Override
public String toString() {
return String.format(
"Hostname: %s, InstanceId: %s, APP_NAME: %s, RAC : %s, Location %s, Id: %s: Token: %s, IP: %s",
hostname, instanceId, app, availabilityZone, location, Id, token, publicip);
}
public String getDC() {
return location;
}
public void setDC(String location) {
this.location = location;
}
public long getUpdatetime() {
return updatetime;
}
public void setUpdatetime(long updatetime) {
this.updatetime = updatetime;
}
public boolean isOutOfService() {
return outOfService;
}
public PriamInstance setOutOfService() {
this.outOfService = true;
return this;
}
}
| 3,210 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/identity/IPriamInstanceFactory.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.identity;
import com.google.common.collect.ImmutableSet;
import com.google.inject.ImplementedBy;
import com.netflix.priam.aws.SDBInstanceFactory;
import java.util.Map;
/**
* Interface for managing Cassandra instance data. Provides functionality to register, update,
* delete or list instances from the registry
*/
@ImplementedBy(SDBInstanceFactory.class)
public interface IPriamInstanceFactory {
/**
* Return a list of all Cassandra server nodes registered.
*
* @param appName the cluster name
* @return a list of all nodes in {@code appName}
*/
ImmutableSet<PriamInstance> getAllIds(String appName);
/**
* Return the Cassandra server node with the given {@code id}.
*
* @param appName the cluster name
* @param id the node id
* @return the node with the given {@code id}, or {@code null} if none found
*/
PriamInstance getInstance(String appName, String dc, int id);
/**
* Create/Register an instance of the server with its info.
*
* @param app
* @param id
* @param instanceID
* @param hostname
* @param ip
* @param rac
* @param volumes
* @param token
* @return the new node
*/
PriamInstance create(
String app,
int id,
String instanceID,
String hostname,
String ip,
String rac,
Map<String, Object> volumes,
String token);
/**
* Delete the server node from the registry
*
* @param inst the node to delete
*/
void delete(PriamInstance inst);
/**
* Update the details of the server node in registry
*
* @param orig the values that should exist in the database for the update to succeed
* @param inst the new values
*/
void update(PriamInstance orig, PriamInstance inst);
}
| 3,211 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/identity/IMembership.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.identity;
import com.google.common.collect.ImmutableSet;
import com.google.inject.ImplementedBy;
import com.netflix.priam.aws.AWSMembership;
import java.util.Collection;
/**
* Interface to manage membership meta information such as size of RAC, list of nodes in RAC etc.
* Also perform ACL updates used in multi-regional clusters
*/
@ImplementedBy(AWSMembership.class)
public interface IMembership {
/**
* Get a list of Instances in the current RAC
*
* @return
*/
ImmutableSet<String> getRacMembership();
/** @return Size of current RAC */
int getRacMembershipSize();
/**
* Get a set of Instances in the cross-account but current RAC
*
* @return
*/
ImmutableSet<String> getCrossAccountRacMembership();
/**
* Number of RACs
*
* @return
*/
int getRacCount();
/**
* Add security group ACLs
*
* @param listIPs
* @param from
* @param to
*/
void addACL(Collection<String> listIPs, int from, int to);
/**
* Remove security group ACLs
*
* @param listIPs
* @param from
* @param to
*/
void removeACL(Collection<String> listIPs, int from, int to);
/**
* List all ACLs
*
* @return
*/
ImmutableSet<String> listACL(int from, int to);
/**
* Expand the membership size by 1.
*
* @param count
*/
void expandRacMembership(int count);
}
| 3,212 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam/identity | Create_ds/Priam/priam/src/main/java/com/netflix/priam/identity/token/TokenRetrieverUtils.java | package com.netflix.priam.identity.token;
import com.google.common.collect.ImmutableSet;
import com.netflix.priam.identity.PriamInstance;
import com.netflix.priam.utils.GsonJsonSerializer;
import com.netflix.priam.utils.SystemUtils;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.json.simple.parser.ParseException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Common utilities for token retrieval. */
public class TokenRetrieverUtils {
private static final Logger logger = LoggerFactory.getLogger(TokenRetrieverUtils.class);
private static final String STATUS_URL_FORMAT = "http://%s:8080/Priam/REST/v1/cassadmin/status";
/**
* Utility method to infer the IP of the owner of a token in a given datacenter. This method
* uses Cassandra status information to find the owner. While it is ideal to check all the nodes
* in the ring to see if they agree on the IP to be replaced, in large clusters it may affect
* the startup performance. This method picks at most 3 random hosts from the ring and see if
* they all agree on the IP to be replaced. If not, it returns null.
*
* @param allIds
* @param token
* @param dc
* @return IP of the token owner based on gossip information or null if C* status doesn't
* converge.
*/
public static InferredTokenOwnership inferTokenOwnerFromGossip(
ImmutableSet<PriamInstance> allIds, String token, String dc) {
// Avoid using dead instance who we are trying to replace (duh!!)
// Avoid other regions instances to avoid communication over public ip address.
List<? extends PriamInstance> eligibleInstances =
allIds.stream()
.filter(priamInstance -> !token.equalsIgnoreCase(priamInstance.getToken()))
.filter(priamInstance -> priamInstance.getDC().equalsIgnoreCase(dc))
.collect(Collectors.toList());
// We want to get IP from min 1, max 3 instances to ensure we are not relying on
// gossip of a single instance.
// Good idea to shuffle so we are not talking to same instances every time.
Collections.shuffle(eligibleInstances);
// Potential issue could be when you have about 50% of your cluster C* DOWN or
// trying to be replaced.
// Think of a major disaster hitting your cluster. In that scenario chances of
// instance hitting DOWN C* are much much higher.
// In such a case you should rely on @link{CassandraConfig#setReplacedIp}.
int noOfInstancesGossipShouldMatch = Math.max(1, Math.min(3, eligibleInstances.size()));
// While it is ideal to check all the nodes in the ring to see if they agree on
// the IP to be replaced, in large clusters it may affect the startup
// performance. So we pick three random hosts from the ring and see if they all
// agree on the IP to be replaced. If not, we don't replace.
InferredTokenOwnership inferredTokenOwnership = new InferredTokenOwnership();
int matchedGossipInstances = 0, reachableInstances = 0;
for (PriamInstance instance : eligibleInstances) {
logger.info("Finding down nodes from ip[{}]; token[{}]", instance.getHostIP(), token);
try {
TokenInformation tokenInformation =
getTokenInformation(instance.getHostIP(), token);
reachableInstances++;
if (inferredTokenOwnership.getTokenInformation() == null) {
inferredTokenOwnership.setTokenInformation(tokenInformation);
}
if (inferredTokenOwnership.getTokenInformation().equals(tokenInformation)) {
matchedGossipInstances++;
if (matchedGossipInstances == noOfInstancesGossipShouldMatch) {
inferredTokenOwnership.setTokenInformationStatus(
InferredTokenOwnership.TokenInformationStatus.GOOD);
return inferredTokenOwnership;
}
} else {
// Mismatch in the gossip information from Cassandra.
inferredTokenOwnership.setTokenInformationStatus(
InferredTokenOwnership.TokenInformationStatus.MISMATCH);
logger.info(
"There is a mismatch in the status information reported by Cassandra. TokenInformation1: {}, TokenInformation2: {}",
inferredTokenOwnership.getTokenInformation(),
tokenInformation);
inferredTokenOwnership.setTokenInformation(
inferredTokenOwnership.getTokenInformation().isLive
? inferredTokenOwnership.getTokenInformation()
: tokenInformation);
return inferredTokenOwnership;
}
} catch (GossipParseException e) {
logger.warn(e.getMessage());
}
}
// If we are not able to reach at least minimum required instances.
if (reachableInstances < noOfInstancesGossipShouldMatch) {
inferredTokenOwnership.setTokenInformationStatus(
InferredTokenOwnership.TokenInformationStatus.UNREACHABLE_NODES);
logger.info(
String.format(
"Unable to find enough instances where gossip match. Required: [%d]",
noOfInstancesGossipShouldMatch));
}
return inferredTokenOwnership;
}
// helper method to get the token owner IP from a Cassandra node.
private static TokenInformation getTokenInformation(String ip, String token)
throws GossipParseException {
String response = null;
try {
response = SystemUtils.getDataFromUrl(String.format(STATUS_URL_FORMAT, ip));
JSONObject jsonObject = (JSONObject) new JSONParser().parse(response);
JSONArray liveNodes = (JSONArray) jsonObject.get("live");
JSONObject tokenToEndpointMap = (JSONObject) jsonObject.get("tokenToEndpointMap");
String endpointInfo = tokenToEndpointMap.get(token).toString();
// We intentionally do not use the "unreachable" nodes as it may or may not be the best
// place to start.
// We just verify that the endpoint we provide is not "live".
boolean isLive = liveNodes.contains(endpointInfo);
return new TokenInformation(endpointInfo, isLive);
} catch (RuntimeException e) {
throw new GossipParseException(
String.format("Error in reaching out to host: [%s]", ip), e);
} catch (ParseException e) {
throw new GossipParseException(
String.format(
"Error in parsing gossip response [%s] from host: [%s]", response, ip),
e);
}
}
public static class TokenInformation {
private String ipAddress;
private boolean isLive;
public TokenInformation(String ipAddress, boolean isLive) {
this.ipAddress = ipAddress;
this.isLive = isLive;
}
public boolean isLive() {
return isLive;
}
public String getIpAddress() {
return ipAddress;
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null || this.getClass() != obj.getClass()) return false;
TokenInformation tokenInformation = (TokenInformation) obj;
return this.ipAddress.equalsIgnoreCase(tokenInformation.getIpAddress())
&& isLive == tokenInformation.isLive;
}
public String toString() {
return GsonJsonSerializer.getGson().toJson(this);
}
}
public static class InferredTokenOwnership {
public enum TokenInformationStatus {
GOOD,
UNREACHABLE_NODES,
MISMATCH
}
private TokenInformationStatus tokenInformationStatus =
TokenInformationStatus.UNREACHABLE_NODES;
private TokenInformation tokenInformation;
public void setTokenInformationStatus(TokenInformationStatus tokenInformationStatus) {
this.tokenInformationStatus = tokenInformationStatus;
}
public void setTokenInformation(TokenInformation tokenInformation) {
this.tokenInformation = tokenInformation;
}
public TokenInformationStatus getTokenInformationStatus() {
return tokenInformationStatus;
}
public TokenInformation getTokenInformation() {
return tokenInformation;
}
}
/**
* This exception is thrown either when instances are not available or when they return invalid
* response.
*/
public static class GossipParseException extends Exception {
private static final long serialVersionUID = 1462488371031437486L;
public GossipParseException() {
super();
}
public GossipParseException(String message) {
super(message);
}
public GossipParseException(String message, Throwable t) {
super(message, t);
}
}
}
| 3,213 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam/identity | Create_ds/Priam/priam/src/main/java/com/netflix/priam/identity/token/ITokenRetriever.java | package com.netflix.priam.identity.token;
import com.google.inject.ImplementedBy;
import com.netflix.priam.identity.PriamInstance;
import java.util.Optional;
import org.apache.commons.lang3.math.Fraction;
/** Fetches PriamInstances and other data which is convenient at the time */
@ImplementedBy(TokenRetriever.class)
public interface ITokenRetriever {
PriamInstance get() throws Exception;
/** Gets the IP address of the dead instance whose token we will acquire. */
Optional<String> getReplacedIp();
boolean isTokenPregenerated();
/** returns the percentage of tokens in the ring which come before this node's token */
Fraction getRingPosition() throws Exception;
}
| 3,214 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam/identity | Create_ds/Priam/priam/src/main/java/com/netflix/priam/identity/token/TokenRetriever.java | package com.netflix.priam.identity.token;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.identity.IMembership;
import com.netflix.priam.identity.IPriamInstanceFactory;
import com.netflix.priam.identity.InstanceIdentity;
import com.netflix.priam.identity.PriamInstance;
import com.netflix.priam.identity.config.InstanceInfo;
import com.netflix.priam.utils.ITokenManager;
import com.netflix.priam.utils.RetryableCallable;
import com.netflix.priam.utils.Sleeper;
import java.math.BigInteger;
import java.util.List;
import java.util.Optional;
import java.util.Random;
import java.util.Set;
import java.util.stream.Collectors;
import javax.inject.Inject;
import org.apache.commons.lang3.math.Fraction;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TokenRetriever implements ITokenRetriever {
public static final String NEW_SLOT = "new_slot";
private static final int MAX_VALUE_IN_MILISECS = 300000; // sleep up to 5 minutes
private static final Logger logger = LoggerFactory.getLogger(InstanceIdentity.class);
private final Random randomizer;
private final Sleeper sleeper;
private final IPriamInstanceFactory factory;
private final IMembership membership;
private final IConfiguration config;
private final ITokenManager tokenManager;
// Instance information contains other information like ASG/vpc-id etc.
private InstanceInfo myInstanceInfo;
private boolean isTokenPregenerated = false;
private String replacedIp;
private PriamInstance priamInstance;
@Inject
public TokenRetriever(
IPriamInstanceFactory factory,
IMembership membership,
IConfiguration config,
InstanceInfo instanceInfo,
Sleeper sleeper,
ITokenManager tokenManager) {
this.factory = factory;
this.membership = membership;
this.config = config;
this.myInstanceInfo = instanceInfo;
this.randomizer = new Random();
this.sleeper = sleeper;
this.tokenManager = tokenManager;
}
@Override
public PriamInstance get() throws Exception {
if (priamInstance == null) {
priamInstance = grabPreAssignedToken();
}
if (priamInstance == null) {
priamInstance = grabExistingToken();
}
if (priamInstance == null) {
priamInstance = grabNewToken();
}
logger.info("My instance: {}", priamInstance);
return priamInstance;
}
@Override
public Optional<String> getReplacedIp() {
return Optional.ofNullable(replacedIp);
}
@Override
public boolean isTokenPregenerated() {
return isTokenPregenerated;
}
@Override
public Fraction getRingPosition() throws Exception {
get();
BigInteger token = new BigInteger(priamInstance.getToken());
ImmutableSet<PriamInstance> nodes = factory.getAllIds(config.getAppName());
long ringPosition =
nodes.stream()
.filter(node -> token.compareTo(new BigInteger(node.getToken())) > 0)
.count();
return Fraction.getFraction(Math.toIntExact(ringPosition), nodes.size());
}
private PriamInstance grabPreAssignedToken() throws Exception {
return new RetryableCallable<PriamInstance>() {
@Override
public PriamInstance retriableCall() throws Exception {
logger.info("Trying to grab a pre-assigned token.");
// Check if this node is decommissioned.
ImmutableSet<PriamInstance> allIds =
factory.getAllIds(config.getAppName() + "-dead");
Optional<PriamInstance> instance =
findInstance(allIds).map(PriamInstance::setOutOfService);
if (!instance.isPresent()) {
ImmutableSet<PriamInstance> liveNodes = factory.getAllIds(config.getAppName());
instance = instance.map(Optional::of).orElseGet(() -> findInstance(liveNodes));
if (instance.isPresent()) {
// Why check gossip? Priam might have crashed before bootstrapping
// Cassandra in replace mode.
replacedIp = getReplacedIpForAssignedToken(liveNodes, instance.get());
}
}
return instance.map(i -> claimToken(i)).orElse(null);
}
}.call();
}
@VisibleForTesting
public PriamInstance grabExistingToken() throws Exception {
return new RetryableCallable<PriamInstance>() {
@Override
public PriamInstance retriableCall() throws Exception {
logger.info("Trying to grab an existing token");
sleeper.sleep(new Random().nextInt(5000) + 10000);
Set<String> racInstanceIds = getRacInstanceIds();
ImmutableSet<PriamInstance> allIds = factory.getAllIds(config.getAppName());
List<PriamInstance> instances =
allIds.stream()
.filter(i -> i.getRac().equals(myInstanceInfo.getRac()))
.filter(i -> !racInstanceIds.contains(i.getInstanceId()))
.collect(Collectors.toList());
Optional<PriamInstance> candidate =
instances.stream().filter(i -> !isNew(i)).findFirst();
candidate.ifPresent(i -> replacedIp = getReplacedIpForExistingToken(allIds, i));
if (replacedIp == null) {
candidate = instances.stream().filter(i -> isNew(i)).findFirst();
candidate.ifPresent(i -> isTokenPregenerated = true);
}
return candidate.map(i -> claimToken(i)).orElse(null);
}
}.call();
}
private PriamInstance grabNewToken() throws Exception {
Preconditions.checkState(config.isCreateNewTokenEnable());
return new RetryableCallable<PriamInstance>() {
@Override
public PriamInstance retriableCall() throws Exception {
set(100, 100);
logger.info("Trying to generate a new token");
sleeper.sleep(new Random().nextInt(15000));
return generateNewToken();
}
}.call();
}
@VisibleForTesting
PriamInstance generateNewToken() {
String myRegion = myInstanceInfo.getRegion();
// this offset ensures the nodes are spread far away from the other regions.
int regionOffset = tokenManager.regionOffset(myRegion);
String myRac = myInstanceInfo.getRac();
List<String> racs = config.getRacs();
ImmutableSet<PriamInstance> allIds = factory.getAllIds(config.getAppName());
int mySlot =
allIds.stream()
.filter(i -> i.getRac().equals(myRac))
.map(PriamInstance::getId)
.max(Integer::compareTo)
.map(id -> racs.size() + Math.max(id, regionOffset) - regionOffset)
.orElseGet(
() -> {
Preconditions.checkState(racs.contains(myRac));
return racs.indexOf(myRac);
});
int instanceCount = membership.getRacCount() * membership.getRacMembershipSize();
String newToken = tokenManager.createToken(mySlot, instanceCount, myRegion);
while (newTokenIsADuplicate(newToken, allIds)) {
newToken = new BigInteger(newToken).add(BigInteger.ONE).toString();
}
return createToken(mySlot + regionOffset, newToken);
}
private boolean newTokenIsADuplicate(String newToken, ImmutableSet<PriamInstance> instances) {
for (PriamInstance priamInstance : instances) {
if (newToken.equals(priamInstance.getToken())) {
if (myInstanceInfo.getRegion().equals(priamInstance.getDC())) {
throw new IllegalStateException(
String.format(
"Trying to add token %s to %s but it already exists in %s",
newToken, myInstanceInfo.getRegion(), priamInstance.getDC()));
}
return true;
}
}
return false;
}
private String getReplacedIpForAssignedToken(
ImmutableSet<PriamInstance> aliveInstances, PriamInstance instance)
throws TokenRetrieverUtils.GossipParseException {
// Infer current ownership information from other instances using gossip.
TokenRetrieverUtils.InferredTokenOwnership inferredTokenOwnership =
TokenRetrieverUtils.inferTokenOwnerFromGossip(
aliveInstances, instance.getToken(), instance.getDC());
// if unreachable rely on token database.
// if mismatch rely on token database.
String ipToReplace = null;
if (inferredTokenOwnership.getTokenInformationStatus()
== TokenRetrieverUtils.InferredTokenOwnership.TokenInformationStatus.GOOD) {
Preconditions.checkNotNull(inferredTokenOwnership.getTokenInformation());
String inferredIp = inferredTokenOwnership.getTokenInformation().getIpAddress();
if (!inferredIp.equals(myInstanceInfo.getHostIP())
&& !inferredIp.equals(myInstanceInfo.getPrivateIP())) {
if (inferredTokenOwnership.getTokenInformation().isLive()) {
throw new TokenRetrieverUtils.GossipParseException(
"We have been assigned a token that C* thinks is alive. Throwing to buy time in the hopes that Gossip just needs to settle.");
}
ipToReplace = inferredIp;
logger.info(
"Priam found that the token is not alive according to Cassandra and we should start Cassandra in replace mode with replace ip: "
+ inferredIp);
}
} else if (inferredTokenOwnership.getTokenInformationStatus()
== TokenRetrieverUtils.InferredTokenOwnership.TokenInformationStatus
.MISMATCH
&& !config.permitDirectTokenAssignmentWithGossipMismatch()) {
throw new TokenRetrieverUtils.GossipParseException(
"We saw inconsistent results from gossip. Throwing to buy time for it to settle.");
}
return ipToReplace;
}
private String getReplacedIpForExistingToken(
ImmutableSet<PriamInstance> allInstancesWithinCluster, PriamInstance priamInstance) {
// Infer current ownership information from other instances using gossip.
TokenRetrieverUtils.InferredTokenOwnership inferredTokenInformation =
TokenRetrieverUtils.inferTokenOwnerFromGossip(
allInstancesWithinCluster, priamInstance.getToken(), priamInstance.getDC());
switch (inferredTokenInformation.getTokenInformationStatus()) {
case GOOD:
if (inferredTokenInformation.getTokenInformation() == null) {
logger.error(
"If you see this message, it should not have happened. We expect token ownership information if all nodes agree. This is a code bounty issue.");
return null;
}
// Everyone agreed to a value. Check if it is live node.
if (inferredTokenInformation.getTokenInformation().isLive()) {
logger.info(
"This token is considered alive unanimously! We will not replace this instance.");
return null;
} else {
String ip = inferredTokenInformation.getTokenInformation().getIpAddress();
logger.info("Will try to replace token owned by {}", ip);
return ip;
}
case UNREACHABLE_NODES:
// In case of unable to reach sufficient nodes, fallback to IP in token
// database. This could be a genuine case of say missing security
// permissions.
logger.warn(
"Unable to reach sufficient nodes. Please check security group permissions or there might be a network partition.");
logger.info(
"Will try to replace token: {} with replacedIp from Token database: {}",
priamInstance.getToken(),
priamInstance.getHostIP());
return priamInstance.getHostIP();
case MISMATCH:
// Lets not replace the instance if gossip info is not merging!!
logger.info(
"Mismatch in gossip. We will not replace this instance, until gossip settles down.");
return null;
default:
throw new IllegalStateException(
"Unexpected value: "
+ inferredTokenInformation.getTokenInformationStatus());
}
}
private PriamInstance claimToken(PriamInstance originalInstance) {
String hostIP =
config.usePrivateIP() ? myInstanceInfo.getPrivateIP() : myInstanceInfo.getHostIP();
if (originalInstance.getInstanceId().equals(myInstanceInfo.getInstanceId())
&& originalInstance.getHostName().equals(myInstanceInfo.getHostname())
&& originalInstance.getHostIP().equals(hostIP)
&& originalInstance.getRac().equals(myInstanceInfo.getRac())) {
return originalInstance;
}
PriamInstance newInstance = new PriamInstance();
newInstance.setApp(config.getAppName());
newInstance.setId(originalInstance.getId());
newInstance.setInstanceId(myInstanceInfo.getInstanceId());
newInstance.setHost(myInstanceInfo.getHostname());
newInstance.setHostIP(hostIP);
newInstance.setRac(myInstanceInfo.getRac());
newInstance.setVolumes(originalInstance.getVolumes());
newInstance.setToken(originalInstance.getToken());
newInstance.setDC(originalInstance.getDC());
try {
factory.update(originalInstance, newInstance);
} catch (Exception ex) {
long sleepTime = randomizer.nextInt(MAX_VALUE_IN_MILISECS);
String token = newInstance.getToken();
logger.warn("Failed updating token: {}; sleeping {} millis", token, sleepTime);
sleeper.sleepQuietly(sleepTime);
throw ex;
}
return newInstance;
}
private PriamInstance createToken(int id, String token) {
try {
String hostIp =
config.usePrivateIP()
? myInstanceInfo.getPrivateIP()
: myInstanceInfo.getHostIP();
return factory.create(
config.getAppName(),
id,
myInstanceInfo.getInstanceId(),
myInstanceInfo.getHostname(),
hostIp,
myInstanceInfo.getRac(),
null /* volumes */,
token);
} catch (Exception ex) {
long sleepTime = randomizer.nextInt(MAX_VALUE_IN_MILISECS);
logger.warn("Failed updating token: {}; sleeping {} millis", token, sleepTime);
sleeper.sleepQuietly(sleepTime);
throw ex;
}
}
private Optional<PriamInstance> findInstance(ImmutableSet<PriamInstance> instances) {
return instances
.stream()
.filter((i) -> i.getInstanceId().equals(myInstanceInfo.getInstanceId()))
.findFirst();
}
private Set<String> getRacInstanceIds() { // TODO(CASS-1986)
ImmutableSet<String> racMembership = membership.getRacMembership();
return config.isDualAccount()
? Sets.union(membership.getCrossAccountRacMembership(), racMembership)
: racMembership;
}
private boolean isNew(PriamInstance instance) {
return instance.getInstanceId().equals(NEW_SLOT);
}
}
| 3,215 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam/identity | Create_ds/Priam/priam/src/main/java/com/netflix/priam/identity/config/InstanceInfo.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.identity.config;
import com.google.common.collect.ImmutableList;
import com.google.inject.ImplementedBy;
import com.netflix.priam.config.IConfiguration;
import java.util.List;
/** A means to fetch meta data of running instance */
@ImplementedBy(AWSInstanceInfo.class)
public interface InstanceInfo {
/**
* Get the availability zone of the running instance.
*
* @return the availability zone of the running instance. e.g. us-east-1c
*/
String getRac();
/**
* Get the list of default racks available for this DC. This is used if no value is configured
* for {@link IConfiguration#getRacs()}
*
* @return list of default racks.
*/
default List<String> getDefaultRacks() {
return ImmutableList.of(getRac());
}
/**
* Get the hostname for the running instance. Cannot be null.
*
* @return the public hostname for the running instance. e.g.:
* ec2-12-34-56-78.compute-1.amazonaws.com, if available. Else return private ip address for
* running instance.
*/
String getHostname();
/**
* Get ip address for running instance. Cannot be null.
*
* @return public ip if one is provided or private ip address for running instance.
*/
String getHostIP();
/**
* Get private ip address for running instance.
*
* @return private ip address for running instance.
*/
String getPrivateIP();
/**
* Get the instance id of the running instance.
*
* @return the instance id of the running instance. e.g. i-07a88a49ff155353
*/
String getInstanceId();
/**
* Get the instance type of the running instance.
*
* @return the instance type of the running instance. e.g. i3.2xlarge
*/
String getInstanceType();
/**
* Get the id of the vpc account for running instance.
*
* @return the id of the vpc account for running instance, null if does not exist.
*/
String getVpcId(); // the id of the vpc for running instance
/**
* Get the region/data center of running instance
*
* @return the region of running instance, could be null/empty. (e.g. us-east-1)
*/
String getRegion();
/**
* Get the ASG in which this instance is deployed. Note that Priam requires instances to be
* under an ASG.
*
* @return the ASG of the instance. ex: cassandra_app--useast1e
*/
String getAutoScalingGroup();
/**
* Environment of the current running instance. AWS only allows VPC environment (default).
* Classic is deprecated environment by AWS.
*
* @return Environment of the current running instance.
*/
InstanceEnvironment getInstanceEnvironment();
enum InstanceEnvironment {
CLASSIC,
VPC
}
}
| 3,216 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam/identity | Create_ds/Priam/priam/src/main/java/com/netflix/priam/identity/config/LocalInstanceInfo.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.identity.config;
/**
* Looks at local (system) properties for metadata about the running 'instance'. Typically, this is
* used for locally-deployed testing.
*/
public class LocalInstanceInfo implements InstanceInfo {
private static final String PREFIX = "Priam.localInstance.";
@Override
public String getRac() {
return System.getProperty(PREFIX + "availabilityZone", "");
}
@Override
public String getHostname() {
return System.getProperty(PREFIX + "privateIp", "");
}
@Override
public String getHostIP() {
return System.getProperty(PREFIX + "privateIp", "");
}
@Override
public String getPrivateIP() {
return System.getProperty(PREFIX + "privateIp", "");
}
@Override
public String getInstanceId() {
return System.getProperty(PREFIX + "instanceId", "");
}
@Override
public String getInstanceType() {
return System.getProperty(PREFIX + "instanceType", "");
}
@Override
public String getVpcId() {
return System.getProperty(PREFIX + "vpcid", "");
}
@Override
public String getAutoScalingGroup() {
return System.getProperty(PREFIX + "asg", "");
}
@Override
public InstanceEnvironment getInstanceEnvironment() {
return (getVpcId() == null) ? InstanceEnvironment.CLASSIC : InstanceEnvironment.VPC;
}
@Override
public String getRegion() {
return System.getProperty(PREFIX + "region", "");
}
}
| 3,217 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam/identity | Create_ds/Priam/priam/src/main/java/com/netflix/priam/identity/config/AWSInstanceInfo.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.identity.config;
import com.amazonaws.services.ec2.AmazonEC2;
import com.amazonaws.services.ec2.AmazonEC2ClientBuilder;
import com.amazonaws.services.ec2.model.*;
import com.amazonaws.util.EC2MetadataUtils;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.netflix.priam.cred.ICredential;
import com.netflix.priam.utils.RetryableCallable;
import java.util.List;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.lang3.StringUtils;
import org.codehaus.jettison.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Singleton
public class AWSInstanceInfo implements InstanceInfo {
private static final Logger logger = LoggerFactory.getLogger(AWSInstanceInfo.class);
static final String PUBLIC_HOSTNAME_URL = "/latest/meta-data/public-hostname";
static final String LOCAL_HOSTNAME_URL = "/latest/meta-data/local-hostname";
static final String PUBLIC_HOSTIP_URL = "/latest/meta-data/public-ipv4";
static final String LOCAL_HOSTIP_URL = "/latest/meta-data/local-ipv4";
private JSONObject identityDocument = null;
private String privateIp;
private String hostIP;
private String rac;
private String hostName;
private String instanceId;
private String instanceType;
private String mac;
private String region;
private ICredential credential;
private String vpcId;
private InstanceEnvironment instanceEnvironment;
@Inject
public AWSInstanceInfo(ICredential credential) {
this.credential = credential;
}
@Override
public String getPrivateIP() {
if (privateIp == null) {
privateIp = EC2MetadataUtils.getPrivateIpAddress();
}
return privateIp;
}
@Override
public String getRac() {
if (rac == null) {
rac = EC2MetadataUtils.getAvailabilityZone();
}
return rac;
}
@Override
public List<String> getDefaultRacks() {
// Get the fist 3 available zones in the region
AmazonEC2 client =
AmazonEC2ClientBuilder.standard()
.withCredentials(credential.getAwsCredentialProvider())
.withRegion(getRegion())
.build();
DescribeAvailabilityZonesResult res = client.describeAvailabilityZones();
List<String> zone = Lists.newArrayList();
for (AvailabilityZone reg : res.getAvailabilityZones()) {
if (reg.getState().equals("available")) zone.add(reg.getZoneName());
if (zone.size() == 3) break;
}
return ImmutableList.copyOf(zone);
}
@Override
public String getInstanceId() {
if (instanceId == null) {
instanceId = EC2MetadataUtils.getInstanceId();
}
return instanceId;
}
@Override
public String getInstanceType() {
if (instanceType == null) {
instanceType = EC2MetadataUtils.getInstanceType();
}
return instanceType;
}
private String getMac() {
if (mac == null) {
mac = EC2MetadataUtils.getNetworkInterfaces().get(0).getMacAddress();
}
return mac;
}
@Override
public String getRegion() {
if (region == null) {
region = EC2MetadataUtils.getEC2InstanceRegion();
}
return region;
}
@Override
public String getVpcId() {
String nacId = getMac();
if (StringUtils.isEmpty(nacId)) return null;
if (vpcId == null)
try {
vpcId = EC2MetadataUtils.getNetworkInterfaces().get(0).getVpcId();
} catch (Exception e) {
logger.info(
"Vpc id does not exist for running instance, not fatal as running instance maybe not be in vpc. Msg: {}",
e.getLocalizedMessage());
}
return vpcId;
}
@Override
public String getAutoScalingGroup() {
final AmazonEC2 client =
AmazonEC2ClientBuilder.standard()
.withCredentials(credential.getAwsCredentialProvider())
.withRegion(getRegion())
.build();
try {
return new RetryableCallable<String>(15, 30000) {
public String retriableCall() throws IllegalStateException {
DescribeInstancesRequest desc =
new DescribeInstancesRequest().withInstanceIds(getInstanceId());
DescribeInstancesResult res = client.describeInstances(desc);
for (Reservation resr : res.getReservations()) {
for (Instance ins : resr.getInstances()) {
for (com.amazonaws.services.ec2.model.Tag tag : ins.getTags()) {
if (tag.getKey().equals("aws:autoscaling:groupName"))
return tag.getValue();
}
}
}
throw new IllegalStateException("Couldn't determine ASG name");
}
}.call();
} catch (Exception e) {
logger.error("Failed to determine ASG name.", e);
return null;
}
}
@Override
public InstanceEnvironment getInstanceEnvironment() {
if (instanceEnvironment == null) {
instanceEnvironment =
(getVpcId() == null) ? InstanceEnvironment.CLASSIC : InstanceEnvironment.VPC;
}
return instanceEnvironment;
}
@Override
public String getHostname() {
if (hostName == null) {
String publicHostName = tryGetDataFromUrl(PUBLIC_HOSTNAME_URL);
hostName =
publicHostName == null ? tryGetDataFromUrl(LOCAL_HOSTNAME_URL) : publicHostName;
}
return hostName;
}
@Override
public String getHostIP() {
if (hostIP == null) {
String publicHostIP = tryGetDataFromUrl(PUBLIC_HOSTIP_URL);
hostIP = publicHostIP == null ? tryGetDataFromUrl(LOCAL_HOSTIP_URL) : publicHostIP;
}
return hostIP;
}
String tryGetDataFromUrl(String url) {
try {
return EC2MetadataUtils.getData(url);
} catch (Exception e) {
return null;
}
}
}
| 3,218 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/connection/INodeToolObserver.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.connection;
import org.apache.cassandra.tools.NodeProbe;
/*
* Represents an entity interested in a change of state to the NodeTool
*/
public interface INodeToolObserver {
void nodeToolHasChanged(NodeProbe nodeTool);
}
| 3,219 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/connection/INodeToolObservable.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.connection;
public interface INodeToolObservable {
/*
* @param observer to add to list of internal observers.
*/
void addObserver(INodeToolObserver observer);
void deleteObserver(INodeToolObserver observer);
}
| 3,220 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/connection/JMXNodeTool.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.connection;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.health.CassandraMonitor;
import com.netflix.priam.utils.BoundedExponentialRetryCallable;
import java.io.IOException;
import java.io.PrintStream;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryUsage;
import java.lang.reflect.Field;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.text.DecimalFormat;
import java.util.*;
import java.util.Map.Entry;
import java.util.concurrent.ExecutionException;
import javax.inject.Inject;
import javax.inject.Singleton;
import javax.management.JMX;
import javax.management.MBeanServerConnection;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import org.apache.cassandra.db.ColumnFamilyStoreMBean;
import org.apache.cassandra.tools.NodeProbe;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Class to get data out of Cassandra JMX */
@Singleton
public class JMXNodeTool extends NodeProbe implements INodeToolObservable {
private static final Logger logger = LoggerFactory.getLogger(JMXNodeTool.class);
private static volatile JMXNodeTool tool = null;
private MBeanServerConnection mbeanServerConn = null;
private static final Set<INodeToolObserver> observers = new HashSet<>();
/**
* Hostname and Port to talk to will be same server for now optionally we might want the ip to
* poll.
*
* <p>NOTE: This class shouldn't be a singleton and this shouldn't be cached.
*
* <p>This will work only if cassandra runs.
*/
public JMXNodeTool(String host, int port) throws IOException, InterruptedException {
super(host, port);
}
public JMXNodeTool(String host, int port, String username, String password)
throws IOException, InterruptedException {
super(host, port, username, password);
}
@Inject
public JMXNodeTool(IConfiguration config) throws IOException, InterruptedException {
super("localhost", config.getJmxPort());
}
/**
* try to create if it is null.
*
* @throws JMXConnectionException
*/
public static JMXNodeTool instance(IConfiguration config) throws JMXConnectionException {
if (!testConnection()) tool = connect(config);
return tool;
}
public static <T> T getRemoteBean(
Class<T> clazz, String mbeanName, IConfiguration config, boolean mxbean)
throws IOException, MalformedObjectNameException {
if (mxbean)
return ManagementFactory.newPlatformMXBeanProxy(
JMXNodeTool.instance(config).mbeanServerConn, mbeanName, clazz);
else
return JMX.newMBeanProxy(
JMXNodeTool.instance(config).mbeanServerConn, new ObjectName(mbeanName), clazz);
}
/**
* Returns plain MBeanServer Connection
*
* @param config Configuration to initialize JMX Connection
* @return MBeanServerConnection
* @throws JMXConnectionException
*/
public static MBeanServerConnection getMbeanServerConn(IConfiguration config)
throws JMXConnectionException {
return JMXNodeTool.instance(config).mbeanServerConn;
}
/**
* This method will test if you can connect and query something before handing over the
* connection, This is required for our retry logic.
*
* @return
*/
private static boolean testConnection() {
// connecting first time hence return false.
if (tool == null) return false;
try {
MBeanServerConnection serverConn = tool.mbeanServerConn;
if (serverConn == null) {
logger.info(
"Test connection to remove MBean server failed as there is no connection.");
return false;
}
if (serverConn.getMBeanCount()
< 1) { // If C* is up, it should have at multiple MBeans registered.
logger.info(
"Test connection to remove MBean server failed as there is no registered MBeans.");
return false;
}
} catch (Throwable ex) {
closeQuietly(tool);
logger.error(
"Exception while checking JMX connection to C*, msg: {}",
ex.getLocalizedMessage());
return false;
}
return true;
}
private static void closeQuietly(JMXNodeTool tool) {
try {
tool.close();
} catch (Exception e) {
logger.warn("failed to close jmx node tool", e);
}
}
/**
* A means to clean up existing and recreate the JMX connection to the Cassandra process.
*
* @return the new connection.
*/
public static synchronized JMXNodeTool createNewConnection(final IConfiguration config)
throws JMXConnectionException {
return createConnection(config);
}
public static synchronized JMXNodeTool connect(final IConfiguration config)
throws JMXConnectionException {
// lets make sure some other monitor didn't sneak in the recreated the connection already
if (!testConnection()) {
if (tool != null) {
try {
tool.close(); // Ensure we properly close any existing (even if it's
// corrupted) connection to the remote jmx agent
} catch (IOException e) {
logger.warn(
"Exception performing house cleaning -- closing current connection to jmx remote agent. Msg: {}",
e.getLocalizedMessage(),
e);
}
}
} else {
// Someone beat you and already created the connection, nothing you need to do..
return tool;
}
return createConnection(config);
}
private static JMXNodeTool createConnection(final IConfiguration config)
throws JMXConnectionException {
// If Cassandra is started then only start the monitoring
if (!CassandraMonitor.hasCassadraStarted()) {
String exceptionMsg =
"Cannot perform connection to remove jmx agent as Cassandra has not yet started, check back again later";
logger.debug(exceptionMsg);
throw new JMXConnectionException(exceptionMsg);
}
if (tool
!= null) { // lets make sure we properly close any existing (even if it's corrupted)
// connection to the remote jmx agent
try {
tool.close();
} catch (IOException e) {
logger.warn(
"Exception performing house cleaning -- closing current connection to jmx remote agent. Msg: {}",
e.getLocalizedMessage(),
e);
}
}
try {
tool =
new BoundedExponentialRetryCallable<JMXNodeTool>() {
@Override
public JMXNodeTool retriableCall() throws Exception {
JMXNodeTool nodetool;
if ((config.getJmxUsername() == null
|| config.getJmxUsername().isEmpty())
&& (config.getJmxPassword() == null
|| config.getJmxPassword().isEmpty())) {
nodetool = new JMXNodeTool("localhost", config.getJmxPort());
} else {
nodetool =
new JMXNodeTool(
"localhost",
config.getJmxPort(),
config.getJmxUsername(),
config.getJmxPassword());
}
Field fields[] = NodeProbe.class.getDeclaredFields();
for (Field field : fields) {
if (!field.getName().equals("mbeanServerConn")) continue;
field.setAccessible(true);
nodetool.mbeanServerConn =
(MBeanServerConnection) field.get(nodetool);
}
return nodetool;
}
}.call();
} catch (Exception e) {
logger.error(e.getMessage(), e);
throw new JMXConnectionException(e.getMessage());
}
logger.info("Connected to remote jmx agent, will notify interested parties!");
for (INodeToolObserver observer : observers) {
observer.nodeToolHasChanged(tool);
}
return tool;
}
/**
* You must do the compaction before running this to remove the duplicate tokens out of the
* server. TODO code it.
*/
public JSONObject estimateKeys() throws JSONException {
Iterator<Entry<String, ColumnFamilyStoreMBean>> it =
super.getColumnFamilyStoreMBeanProxies();
JSONObject object = new JSONObject();
while (it.hasNext()) {
Entry<String, ColumnFamilyStoreMBean> entry = it.next();
object.put("keyspace", entry.getKey());
object.put("column_family", entry.getValue().getColumnFamilyName());
object.put("estimated_size", entry.getValue().estimateKeys());
}
return object;
}
public JSONObject info() throws JSONException {
JSONObject object = new JSONObject();
object.put("gossip_active", isInitialized());
object.put("thrift_active", isThriftServerRunning());
object.put("native_active", isNativeTransportRunning());
object.put("token", getTokens().toString());
object.put("load", getLoadString());
object.put("generation_no", getCurrentGenerationNumber());
object.put("uptime", getUptime() / 1000);
MemoryUsage heapUsage = getHeapMemoryUsage();
double memUsed = (double) heapUsage.getUsed() / (1024 * 1024);
double memMax = (double) heapUsage.getMax() / (1024 * 1024);
object.put("heap_memory_mb", memUsed + "/" + memMax);
object.put("data_center", getDataCenter());
object.put("rack", getRack());
return object;
}
public JSONObject statusInformation() throws JSONException {
JSONObject jsonObject = new JSONObject();
jsonObject.put("live", getLiveNodes());
jsonObject.put("unreachable", getUnreachableNodes());
jsonObject.put("joining", getJoiningNodes());
jsonObject.put("leaving", getLeavingNodes());
jsonObject.put("moving", getMovingNodes());
jsonObject.put("tokenToEndpointMap", getTokenToEndpointMap());
return jsonObject;
}
public JSONArray ring(String keyspace) throws JSONException {
JSONArray ring = new JSONArray();
Map<String, String> tokenToEndpoint = getTokenToEndpointMap();
List<String> sortedTokens = new ArrayList<>(tokenToEndpoint.keySet());
Collection<String> liveNodes = getLiveNodes();
Collection<String> deadNodes = getUnreachableNodes();
Collection<String> joiningNodes = getJoiningNodes();
Collection<String> leavingNodes = getLeavingNodes();
Collection<String> movingNodes = getMovingNodes();
Map<String, String> loadMap = getLoadMap();
String format = "%-16s%-12s%-12s%-7s%-8s%-16s%-20s%-44s%n";
// Calculate per-token ownership of the ring
Map<InetAddress, Float> ownerships;
try {
ownerships = effectiveOwnership(keyspace);
} catch (IllegalStateException ex) {
ownerships = getOwnership();
}
for (String token : sortedTokens) {
String primaryEndpoint = tokenToEndpoint.get(token);
String dataCenter;
try {
dataCenter = getEndpointSnitchInfoProxy().getDatacenter(primaryEndpoint);
} catch (UnknownHostException e) {
dataCenter = "Unknown";
}
String rack;
try {
rack = getEndpointSnitchInfoProxy().getRack(primaryEndpoint);
} catch (UnknownHostException e) {
rack = "Unknown";
}
String status =
liveNodes.contains(primaryEndpoint)
? "Up"
: deadNodes.contains(primaryEndpoint) ? "Down" : "?";
String state = "Normal";
if (joiningNodes.contains(primaryEndpoint)) state = "Joining";
else if (leavingNodes.contains(primaryEndpoint)) state = "Leaving";
else if (movingNodes.contains(primaryEndpoint)) state = "Moving";
String load = loadMap.getOrDefault(primaryEndpoint, "?");
String owns =
new DecimalFormat("##0.00%")
.format(ownerships.get(token) == null ? 0.0F : ownerships.get(token));
ring.put(
createJson(
primaryEndpoint, dataCenter, rack, status, state, load, owns, token));
}
return ring;
}
private JSONObject createJson(
String primaryEndpoint,
String dataCenter,
String rack,
String status,
String state,
String load,
String owns,
String token)
throws JSONException {
JSONObject object = new JSONObject();
object.put("endpoint", primaryEndpoint);
object.put("dc", dataCenter);
object.put("rack", rack);
object.put("status", status);
object.put("state", state);
object.put("load", load);
object.put("owns", owns);
object.put("token", token);
return object;
}
public void repair(boolean isSequential, boolean localDataCenterOnly)
throws IOException, ExecutionException, InterruptedException {
repair(isSequential, localDataCenterOnly, false);
}
public void repair(boolean isSequential, boolean localDataCenterOnly, boolean primaryRange)
throws IOException, ExecutionException, InterruptedException {
/**
* ** Replace with this in 3.10 cassandra-all. Map<String, String> repairOptions = new
* HashMap<>(); String isParallel = !isSequential?"true":"false";
* repairOptions.put(RepairOption.PARALLELISM_KEY, isParallel);
* repairOptions.put(RepairOption.PRIMARY_RANGE_KEY, primaryRange+""); if
* (localDataCenterOnly) repairOptions.put(RepairOption.DATACENTERS_KEY, getDataCenter());
*/
PrintStream printStream = new PrintStream("repair.log");
Set<String> datacenters = null;
if (localDataCenterOnly) {
datacenters = new HashSet<>();
datacenters.add(getDataCenter());
}
for (String keyspace : getKeyspaces())
forceRepairAsync(
printStream, keyspace, isSequential, datacenters, null, primaryRange, true);
/*if (primaryRange)
forceKeyspaceRepairPrimaryRange(keyspace, isSequential, localDataCenterOnly, new String[0]);
else
forceKeyspaceRepair(keyspace, isSequential, localDataCenterOnly, new String[0]);*/
}
public void cleanup() throws IOException, ExecutionException, InterruptedException {
for (String keyspace : getKeyspaces()) forceKeyspaceCleanup(0, keyspace);
// forceKeyspaceCleanup(keyspace, new String[0]);
}
public void setIncrementalBackupsEnabled(boolean enabled) {
super.setIncrementalBackupsEnabled(enabled);
}
public boolean isIncrementalBackupsEnabled() {
return super.isIncrementalBackupsEnabled();
}
public void refresh(List<String> keyspaces)
throws IOException, ExecutionException, InterruptedException {
Iterator<Entry<String, ColumnFamilyStoreMBean>> it =
super.getColumnFamilyStoreMBeanProxies();
while (it.hasNext()) {
Entry<String, ColumnFamilyStoreMBean> entry = it.next();
if (keyspaces.contains(entry.getKey())) {
logger.info(
"Refreshing {} {}", entry.getKey(), entry.getValue().getColumnFamilyName());
loadNewSSTables(entry.getKey(), entry.getValue().getColumnFamilyName());
}
}
}
@Override
public void close() throws IOException {
synchronized (JMXNodeTool.class) {
tool = null;
super.close();
}
}
/** @param observer to add to list of internal observers. This behavior is thread-safe. */
@Override
public void addObserver(INodeToolObserver observer) {
if (observer == null) throw new NullPointerException("Cannot not observer.");
synchronized (observers) {
observers.add(observer); // if observer exist, it's a noop
}
}
/** @param observer to be removed; behavior is thread-safe. */
@Override
public void deleteObserver(INodeToolObserver observer) {
synchronized (observers) {
observers.remove(observer);
}
}
}
| 3,221 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/connection/CassandraOperations.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.connection;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.utils.RetryableCallable;
import java.util.*;
import javax.inject.Inject;
import org.apache.cassandra.db.ColumnFamilyStoreMBean;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** This class encapsulates interactions with Cassandra. Created by aagrawal on 6/19/18. */
public class CassandraOperations implements ICassandraOperations {
private static final Logger logger = LoggerFactory.getLogger(CassandraOperations.class);
private final IConfiguration configuration;
@Inject
public CassandraOperations(IConfiguration configuration) {
this.configuration = configuration;
}
@Override
public synchronized void takeSnapshot(final String snapshotName) throws Exception {
// Retry max of 6 times with 10 second in between (for one minute). This is to ensure that
// we overcome any temporary glitch.
// Note that operation MAY fail if cassandra successfully took the snapshot of certain
// columnfamily(ies) and we try to create snapshot with
// same name. It is a good practice to call clearSnapshot after this operation fails, to
// ensure we don't leave any left overs.
// Example scenario: Change of file permissions by manual intervention and C* unable to take
// snapshot of one CF.
try {
new RetryableCallable<Void>(6, 10000) {
public Void retriableCall() throws Exception {
JMXNodeTool nodetool = JMXNodeTool.instance(configuration);
nodetool.takeSnapshot(snapshotName, null);
return null;
}
}.call();
} catch (Exception e) {
logger.error(
"Error while taking snapshot {}. Asking Cassandra to clear snapshot to avoid accumulation of snapshots.",
snapshotName);
clearSnapshot(snapshotName);
throw e;
}
}
@Override
public void clearSnapshot(final String snapshotTag) throws Exception {
new RetryableCallable<Void>() {
public Void retriableCall() throws Exception {
JMXNodeTool nodetool = JMXNodeTool.instance(configuration);
nodetool.clearSnapshot(snapshotTag);
return null;
}
}.call();
}
@Override
public List<String> getKeyspaces() throws Exception {
return new RetryableCallable<List<String>>() {
public List<String> retriableCall() throws Exception {
try (JMXNodeTool nodeTool = JMXNodeTool.instance(configuration)) {
return nodeTool.getKeyspaces();
}
}
}.call();
}
@Override
public Map<String, List<String>> getColumnfamilies() throws Exception {
return new RetryableCallable<Map<String, List<String>>>() {
public Map<String, List<String>> retriableCall() throws Exception {
try (JMXNodeTool nodeTool = JMXNodeTool.instance(configuration)) {
final Map<String, List<String>> columnfamilies = new HashMap<>();
Iterator<Map.Entry<String, ColumnFamilyStoreMBean>> columnfamilyStoreMBean =
nodeTool.getColumnFamilyStoreMBeanProxies();
columnfamilyStoreMBean.forEachRemaining(
entry -> {
columnfamilies.putIfAbsent(entry.getKey(), new ArrayList<>());
columnfamilies
.get(entry.getKey())
.add(entry.getValue().getColumnFamilyName());
});
return columnfamilies;
}
}
}.call();
}
@Override
public void forceKeyspaceCompaction(String keyspaceName, String... columnfamilies)
throws Exception {
new RetryableCallable<Void>() {
public Void retriableCall() throws Exception {
try (JMXNodeTool nodeTool = JMXNodeTool.instance(configuration)) {
nodeTool.forceKeyspaceCompaction(keyspaceName, columnfamilies);
return null;
}
}
}.call();
}
@Override
public void forceKeyspaceFlush(String keyspaceName) throws Exception {
new RetryableCallable<Void>() {
public Void retriableCall() throws Exception {
try (JMXNodeTool nodeTool = JMXNodeTool.instance(configuration)) {
nodeTool.forceKeyspaceFlush(keyspaceName);
return null;
}
}
}.call();
}
@Override
public List<Map<String, String>> gossipInfo() throws Exception {
List<Map<String, String>> returnPublicIpSourceIpMap = new ArrayList();
try {
JMXNodeTool nodeTool;
try {
nodeTool = JMXNodeTool.instance(configuration);
} catch (JMXConnectionException e) {
logger.error(
"Exception in fetching c* jmx tool . Msg: {}", e.getLocalizedMessage(), e);
throw e;
}
String gossipInfoLines[] = nodeTool.getGossipInfo().split("/");
Arrays.stream(gossipInfoLines)
.forEach(
gossipInfoLine -> {
Map<String, String> gossipMap = new HashMap<>();
String gossipInfoSubLines[] = gossipInfoLine.split("\\r?\\n");
if (gossipInfoSubLines.length
> 2) // Random check for existence of some lines
{
gossipMap.put("PUBLIC_IP", gossipInfoSubLines[0].trim());
if (gossipMap.get("PUBLIC_IP") != null) {
returnPublicIpSourceIpMap.add(gossipMap);
}
for (String gossipInfoSubLine : gossipInfoSubLines) {
String gossipLineEntry[] = gossipInfoSubLine.split(":");
if (gossipLineEntry.length == 2) {
gossipMap.put(
gossipLineEntry[0].trim().toUpperCase(),
gossipLineEntry[1].trim());
} else if (gossipLineEntry.length == 3) {
if (gossipLineEntry[0]
.trim()
.equalsIgnoreCase("STATUS")) {
// Special handling for STATUS as C* puts first
// token in STATUS or "true".
gossipMap.put(
gossipLineEntry[0].trim().toUpperCase(),
gossipLineEntry[2].split(",")[0].trim());
} else if (gossipLineEntry[0]
.trim()
.equalsIgnoreCase("TOKENS")) {
// Special handling for tokens as it is always
// "hidden".
try {
gossipMap.put(
gossipLineEntry[0].trim().toUpperCase(),
nodeTool.getTokens(
gossipMap.get(
"PUBLIC_IP"))
.toString());
} catch (Exception e) {
logger.warn(
"Unable to find TOKEN(s) for the IP: {}",
gossipMap.get("PUBLIC_IP"));
}
} else {
gossipMap.put(
gossipLineEntry[0].trim().toUpperCase(),
gossipLineEntry[2].trim());
}
}
}
}
});
} catch (Exception e) {
logger.error("Unable to parse nodetool gossipinfo output from Cassandra.", e);
}
return returnPublicIpSourceIpMap;
}
}
| 3,222 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/connection/JMXConnectionException.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.connection;
import java.io.IOException;
public class JMXConnectionException extends IOException {
private static final long serialVersionUID = 444L;
public JMXConnectionException(String message) {
super(message);
}
public JMXConnectionException(String message, Exception e) {
super(message, e);
}
}
| 3,223 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/connection/ICassandraOperations.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.connection;
import java.util.List;
import java.util.Map;
/** Created by aagrawal on 2/16/19. */
public interface ICassandraOperations {
/**
* This method neds to be synchronized. Context: During the transition phase to backup version
* 2.0, we might be executing multiple snapshots at the same time. To avoid, unknown behavior by
* Cassandra, it is wise to keep this method sync. Also, with backups being on CRON, we don't
* know how often operator is taking snapshot.
*
* @param snapshotName Name of the snapshot on disk. This snapshotName should be UNIQUE among
* all the snapshots. Try to append UUID to snapshotName to ensure uniqueness. This is to
* ensure a) Snapshot fails if name are not unique. b) You might take snapshots which are
* not "part" of same snapshot. e.g. Any leftovers from previous operation. c) Once snapshot
* fails, this will clean the failed snapshot.
* @throws Exception in case of error while taking a snapshot by Cassandra.
*/
void takeSnapshot(final String snapshotName) throws Exception;
/**
* Clear the snapshot tag from disk.
*
* @param snapshotTag Name of the snapshot to be removed.
* @throws Exception in case of error while clearing a snapshot.
*/
void clearSnapshot(final String snapshotTag) throws Exception;
/**
* Get all the keyspaces existing on this node.
*
* @return List of keyspace names.
* @throws Exception in case of reaching to JMX endpoint.
*/
List<String> getKeyspaces() throws Exception;
Map<String, List<String>> getColumnfamilies() throws Exception;
void forceKeyspaceCompaction(String keyspaceName, String... columnfamilies) throws Exception;
void forceKeyspaceFlush(String keyspaceName) throws Exception;
List<Map<String, String>> gossipInfo() throws Exception;
}
| 3,224 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/config/PriamConfigurationPersister.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.config;
import com.fasterxml.jackson.core.util.MinimalPrettyPrinter;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectWriter;
import com.netflix.priam.scheduler.CronTimer;
import com.netflix.priam.scheduler.Task;
import com.netflix.priam.scheduler.TaskTimer;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.attribute.PosixFilePermissions;
import java.util.Map;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Task that persists structured and merged priam configuration to disk. */
@Singleton
public class PriamConfigurationPersister extends Task {
public static final String NAME = "PriamConfigurationPersister";
private static final Logger logger = LoggerFactory.getLogger(PriamConfigurationPersister.class);
private final Path mergedConfigDirectory;
private final Path structuredPath;
@Inject
public PriamConfigurationPersister(IConfiguration config) {
super(config);
mergedConfigDirectory = Paths.get(config.getMergedConfigurationDirectory());
structuredPath = Paths.get(config.getMergedConfigurationDirectory(), "structured.json");
}
private synchronized void ensurePaths() throws IOException {
File directory = mergedConfigDirectory.toFile();
if (directory.mkdirs()) {
Files.setPosixFilePermissions(
mergedConfigDirectory, PosixFilePermissions.fromString("rwx------"));
logger.info("Set up PriamConfigurationPersister directory successfully");
}
}
@Override
public void execute() throws Exception {
ensurePaths();
Path tempPath = null;
try {
File output =
File.createTempFile(
structuredPath.getFileName().toString(),
".tmp",
mergedConfigDirectory.toFile());
tempPath = output.toPath();
// The configuration might contain sensitive information, so ... don't let non Priam
// users read it
// Theoretically createTempFile creates the file with the right permissions, but I want
// to be explicit
Files.setPosixFilePermissions(tempPath, PosixFilePermissions.fromString("rw-------"));
Map<String, Object> structuredConfiguration = config.getStructuredConfiguration("all");
ObjectMapper mapper = new ObjectMapper();
ObjectWriter structuredPathTmpWriter = mapper.writer(new MinimalPrettyPrinter());
structuredPathTmpWriter.writeValue(output, structuredConfiguration);
// Atomically swap out the new config for the old config.
if (!output.renameTo(structuredPath.toFile()))
logger.error("Failed to persist structured Priam configuration");
} finally {
if (tempPath != null) Files.deleteIfExists(tempPath);
}
}
@Override
public String getName() {
return NAME;
}
/**
* Timer to be used for configuration writing.
*
* @param config {@link IConfiguration} to get configuration details from priam.
* @return the timer to be used for Configuration Persisting from {@link
* IConfiguration#getMergedConfigurationCronExpression()}
*/
public static TaskTimer getTimer(IConfiguration config) {
return CronTimer.getCronTimer(NAME, config.getMergedConfigurationCronExpression());
}
}
| 3,225 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/config/IBackupRestoreConfig.java | /**
* Copyright 2018 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.config;
import com.google.common.collect.ImmutableSet;
import com.google.inject.ImplementedBy;
import org.apache.commons.lang3.StringUtils;
/**
* This interface is to abstract out the backup and restore configuration used by Priam. Goal is to
* eventually have each module/functionality to have its own Config. Created by aagrawal on 6/26/18.
*/
@ImplementedBy(BackupRestoreConfig.class)
public interface IBackupRestoreConfig {
/**
* Cron expression to be used for snapshot meta service. Use "-1" to disable the service.
*
* @return Snapshot Meta Service cron expression for generating manifest.json
* @see <a
* href="http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html">quartz-scheduler</a>
* @see <a href="http://www.cronmaker.com">http://www.cronmaker.com</a>
*/
default String getSnapshotMetaServiceCronExpression() {
return "-1";
}
/**
* Enable the backup version 2.0 in new format. This will start uploads of "incremental" backups
* in new format. This is to be used for migration from backup version 1.0.
*
* @return boolean value indicating if backups in version 2.0 should be started.
*/
default boolean enableV2Backups() {
return false;
}
/**
* Monitoring period for the service which does TTL of the backups. This service will run only
* if v2 backups are enabled. The idea is to run this service at least once a day to ensure we
* are marking backup files for TTL as configured via {@link
* IConfiguration#getBackupRetentionDays()}. Use -1 to disable this service.
*
* <p>NOTE: This should be scheduled on interval rather than CRON as this results in entire
* fleet to start deletion of files at the same time and remote file system may get overwhelmed.
*
* @return Backup TTL Service execution duration for trying to delete backups. Note that this
* denotes duration of the job trying to delete backups and is not the TTL of the backups.
* @see <a
* href="http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html">quartz-scheduler</a>
* @see <a href="http://www.cronmaker.com">http://www.cronmaker.com</a>
*/
default int getBackupTTLMonitorPeriodInSec() {
return 21600;
}
/**
* Cron expression to be used for the service which does verification of the backups. This
* service will run only if v2 backups are enabled.
*
* @return Backup Verification Service cron expression for trying to verify backups. Default:
* run every hour at 30 minutes.
* @see <a
* href="http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html">quartz-scheduler</a>
* @see <a href="http://www.cronmaker.com">http://www.cronmaker.com</a>
*/
default String getBackupVerificationCronExpression() {
return "0 30 0/1 1/1 * ? *";
}
/**
* The default backup SLO for any cluster. This will ensure that we upload and validate a backup
* in that SLO window. If no valid backup is found, we log ERROR message. This service will run
* only if v2 backups are enabled.
*
* @return the backup SLO in hours. Default: 24 hours.
*/
default int getBackupVerificationSLOInHours() {
return 24;
}
/**
* If restore is enabled and if this flag is enabled, we will try to restore using Backup V2.0.
*
* @return if restore should be using backup version 2.0. If this is false we will use backup
* version 1.0.
*/
default boolean enableV2Restore() {
return false;
}
/**
* Returns a csv of backup component file types {@link
* com.netflix.priam.backup.AbstractBackupPath.BackupFileType} on which to send backup
* notifications. Default value of this filter is an empty string which would imply that backup
* notifications will be sent for all component types see {@link
* com.netflix.priam.backup.AbstractBackupPath.BackupFileType}. Sample filter :
* "SNAPSHOT_VERIFIED, META_V2"
*
* @return A csv string that can be parsed to infer the component file types on which to send
* backup related notifications
*/
default String getBackupNotifyComponentIncludeList() {
return StringUtils.EMPTY;
}
/**
* Returns a set of attribute names to add to MessageAttributes in the backup notifications. SNS
* filter policy needs keys in MessageAttributes in order to filter based on those keys.
*
* @return A set of attributes to include in MessageAttributes.
*/
default ImmutableSet<String> getBackupNotificationAdditionalMessageAttrs() {
return ImmutableSet.of();
}
}
| 3,226 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/config/IConfiguration.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.config;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableSet;
import com.google.inject.ImplementedBy;
import com.netflix.priam.scheduler.UnsupportedTypeException;
import com.netflix.priam.tuner.GCType;
import java.io.File;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang3.StringUtils;
/** Interface for Priam's configuration */
@ImplementedBy(PriamConfiguration.class)
public interface IConfiguration {
void initialize();
/** @return Path to the home dir of Cassandra */
default String getCassHome() {
return "/etc/cassandra";
}
/** @return Location to `cassandra.yaml`. */
default String getYamlLocation() {
return getCassHome() + "/conf/cassandra.yaml";
}
/**
* @return if Priam should tune the jvm.options file Note that Cassandra 2.1 OSS doesn't have
* this file by default, but if someone has added it we can tune it.
*/
default boolean supportsTuningJVMOptionsFile() {
return false;
}
/**
* @return Path to jvm.options file. This is used to pass JVM options to Cassandra. Note that
* Cassandra 2.1 doesn't by default have this file, but if you add it We will allow you to
* tune it.
*/
default String getJVMOptionsFileLocation() {
return getCassHome() + "/conf/jvm.options";
}
/**
* @return Type of garbage collection mechanism to use for Cassandra. Supported values are
* CMS,G1GC
*/
default GCType getGCType() throws UnsupportedTypeException {
return GCType.CMS;
}
/** @return Set of JVM options to exclude/comment. */
default String getJVMExcludeSet() {
return StringUtils.EMPTY;
}
/** @return Set of JMV options to add/upsert */
default String getJVMUpsertSet() {
return StringUtils.EMPTY;
}
/** @return Path to Cassandra startup script */
default String getCassStartupScript() {
return "/etc/init.d/cassandra start";
}
/** @return Path to Cassandra stop sript */
default String getCassStopScript() {
return "/etc/init.d/cassandra stop";
}
/**
* @return int representing how many seconds Priam should fail healthchecks for before
* gracefully draining (nodetool drain) cassandra prior to stop. If this number is negative
* then no draining occurs and Priam immediately stops Cassanddra using the provided stop
* script. If this number is >= 0 then Priam will fail healthchecks for this number of
* seconds before gracefully draining cassandra (nodetool drain) and stopping cassandra with
* the stop script.
*/
default int getGracefulDrainHealthWaitSeconds() {
return -1;
}
/**
* @return int representing how often (in seconds) Priam should auto-remediate Cassandra process
* crash If zero, Priam will restart Cassandra whenever it notices it is crashed If a
* positive number, Priam will restart cassandra no more than once in that number of
* seconds. For example a value of 60 means that Priam will only restart Cassandra once per
* 60 seconds If a negative number, Priam will not restart Cassandra due to crash at all
*/
default int getRemediateDeadCassandraRate() {
return 3600;
}
/**
* Eg: 'my_backup' will result in all files stored under this dir/prefix
*
* @return Prefix that will be added to remote backup location
*/
default String getBackupLocation() {
return "backup";
}
/** @return Get Backup retention in days */
default int getBackupRetentionDays() {
return 0;
}
/** @return Get list of racs to backup. Backup all racs if empty */
default List<String> getBackupRacs() {
return Collections.EMPTY_LIST;
}
/**
* Backup location i.e. remote file system to upload backups. e.g. for S3 it will be s3 bucket
* name
*
* @return Bucket name used for backups
*/
default String getBackupPrefix() {
return "cassandra-archive";
}
/**
* @return Location containing backup files. Typically bucket name followed by path to the
* clusters backup
*/
default String getRestorePrefix() {
return StringUtils.EMPTY;
}
/**
* This is the location of the data/logs/hints for the cassandra. Priam will by default, create
* all the sub-directories required. This dir should have permission to be altered by both
* cassandra and Priam. If this is configured correctly, there is no need to configure {@link
* #getDataFileLocation()}, {@link #getLogDirLocation()}, {@link #getCacheLocation()} and {@link
* #getCommitLogLocation()}. Alternatively all the other directories should be set explicitly by
* user. Set this location to a drive with fast read/writes performance and sizable disk space.
*
* @return Location where all the data/logs/hints for the cassandra will sit.
*/
default String getCassandraBaseDirectory() {
return "/var/lib/cassandra";
}
/** @return Location of the local data dir */
default String getDataFileLocation() {
return getCassandraBaseDirectory() + "/data";
}
default String getLogDirLocation() {
return getCassandraBaseDirectory() + "/logs";
}
/** @return Location of local cache */
default String getCacheLocation() {
return getCassandraBaseDirectory() + "/saved_caches";
}
/** @return Location of local commit log dir */
default String getCommitLogLocation() {
return getCassandraBaseDirectory() + "/commitlog";
}
/** @return Remote commit log location for backups */
default String getBackupCommitLogLocation() {
return StringUtils.EMPTY;
}
/** @return Preferred data part size for multi part uploads */
default long getBackupChunkSize() {
return 10 * 1024 * 1024L;
}
/** @return Cassandra's JMX port */
default int getJmxPort() {
return 7199;
}
/** @return Cassandra's JMX username */
default String getJmxUsername() {
return null;
}
/** @return Cassandra's JMX password */
default String getJmxPassword() {
return null;
}
/** @return Enables Remote JMX connections n C* */
default boolean enableRemoteJMX() {
return false;
}
/** @return Cassandra storage/cluster communication port */
default int getStoragePort() {
return 7000;
}
default int getSSLStoragePort() {
return 7001;
}
/** @return Cassandra's thrift port */
default int getThriftPort() {
return 9160;
}
/** @return Port for CQL binary transport. */
default int getNativeTransportPort() {
return 9042;
}
/** @return Snitch to be used in cassandra.yaml */
default String getSnitch() {
return "org.apache.cassandra.locator.Ec2Snitch";
}
/** @return Cluster name */
default String getAppName() {
return "cass_cluster";
}
/** @return List of all RAC used for the cluster */
List<String> getRacs();
/** @return Max heap size be used for Cassandra */
default String getHeapSize() {
return "8G";
}
/** @return New heap size for Cassandra */
default String getHeapNewSize() {
return "2G";
}
/**
* Cron expression to be used to schedule regular compactions. Use "-1" to disable the CRON.
* Default: -1
*
* @return Compaction cron expression.
* @see <a
* href="http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html">quartz-scheduler</a>
* @see <a href="http://www.cronmaker.com">http://www.cronmaker.com</a> To build new cron timer
*/
default String getCompactionCronExpression() {
return "-1";
}
/**
* Column Family(ies), comma delimited, to start compactions (user-initiated or on CRON). Note
* 1: The expected format is keyspace.cfname. If no value is provided then compaction is
* scheduled for all KS,CF(s) Note 2: CF name allows special character "*" to denote all the
* columnfamilies in a given keyspace. e.g. keyspace1.* denotes all the CFs in keyspace1. Note
* 3: {@link #getCompactionExcludeCFList()} is applied first to exclude CF/keyspace and then
* {@link #getCompactionIncludeCFList()} is applied to include the CF's/keyspaces.
*
* @return Column Family(ies), comma delimited, to start compactions. If no filter is applied,
* returns null.
*/
default String getCompactionIncludeCFList() {
return null;
}
/**
* Column family(ies), comma delimited, to exclude while starting compaction (user-initiated or
* on CRON). Note 1: The expected format is keyspace.cfname. If no value is provided then
* compaction is scheduled for all KS,CF(s) Note 2: CF name allows special character "*" to
* denote all the columnfamilies in a given keyspace. e.g. keyspace1.* denotes all the CFs in
* keyspace1. Note 3: {@link #getCompactionExcludeCFList()} is applied first to exclude
* CF/keyspace and then {@link #getCompactionIncludeCFList()} is applied to include the
* CF's/keyspaces.
*
* @return Column Family(ies), comma delimited, to exclude from compactions. If no filter is
* applied, returns null.
*/
default String getCompactionExcludeCFList() {
return null;
}
/**
* Cron expression to be used for snapshot backups.
*
* @return Backup cron expression for snapshots
* @see <a
* href="http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html">quartz-scheduler</a>
* @see <a href="http://www.cronmaker.com">http://www.cronmaker.com</a> To build new cron timer
*/
default String getBackupCronExpression() {
return "0 0 12 1/1 * ? *";
}
/**
* Column Family(ies), comma delimited, to include during snapshot backup. Note 1: The expected
* format is keyspace.cfname. If no value is provided then snapshot contains all KS,CF(s) Note
* 2: CF name allows special character "*" to denote all the columnfamilies in a given keyspace.
* e.g. keyspace1.* denotes all the CFs in keyspace1. Note 3: {@link
* #getSnapshotExcludeCFList()} is applied first to exclude CF/keyspace and then {@link
* #getSnapshotIncludeCFList()} is applied to include the CF's/keyspaces.
*
* @return Column Family(ies), comma delimited, to include in snapshot backup. If no filter is
* applied, returns null.
*/
default String getSnapshotIncludeCFList() {
return null;
}
/**
* Column family(ies), comma delimited, to exclude during snapshot backup. Note 1: The expected
* format is keyspace.cfname. If no value is provided then snapshot is scheduled for all
* KS,CF(s) Note 2: CF name allows special character "*" to denote all the columnfamilies in a
* given keyspace. e.g. keyspace1.* denotes all the CFs in keyspace1. Note 3: {@link
* #getSnapshotExcludeCFList()} is applied first to exclude CF/keyspace and then {@link
* #getSnapshotIncludeCFList()} is applied to include the CF's/keyspaces.
*
* @return Column Family(ies), comma delimited, to exclude from snapshot backup. If no filter is
* applied, returns null.
*/
default String getSnapshotExcludeCFList() {
return null;
}
/**
* Column Family(ies), comma delimited, to include during incremental backup. Note 1: The
* expected format is keyspace.cfname. If no value is provided then incremental contains all
* KS,CF(s) Note 2: CF name allows special character "*" to denote all the columnfamilies in a
* given keyspace. e.g. keyspace1.* denotes all the CFs in keyspace1. Note 3: {@link
* #getIncrementalExcludeCFList()} is applied first to exclude CF/keyspace and then {@link
* #getIncrementalIncludeCFList()} is applied to include the CF's/keyspaces.
*
* @return Column Family(ies), comma delimited, to include in incremental backup. If no filter
* is applied, returns null.
*/
default String getIncrementalIncludeCFList() {
return null;
}
/**
* Column family(ies), comma delimited, to exclude during incremental backup. Note 1: The
* expected format is keyspace.cfname. If no value is provided then incremental is scheduled for
* all KS,CF(s) Note 2: CF name allows special character "*" to denote all the columnfamilies in
* a given keyspace. e.g. keyspace1.* denotes all the CFs in keyspace1. Note 3: {@link
* #getIncrementalExcludeCFList()} is applied first to exclude CF/keyspace and then {@link
* #getIncrementalIncludeCFList()} is applied to include the CF's/keyspaces.
*
* @return Column Family(ies), comma delimited, to exclude from incremental backup. If no filter
* is applied, returns null.
*/
default String getIncrementalExcludeCFList() {
return null;
}
/**
* Column Family(ies), comma delimited, to include during restore. Note 1: The expected format
* is keyspace.cfname. If no value is provided then restore contains all KS,CF(s) Note 2: CF
* name allows special character "*" to denote all the columnfamilies in a given keyspace. e.g.
* keyspace1.* denotes all the CFs in keyspace1. Note 3: {@link #getRestoreExcludeCFList()} is
* applied first to exclude CF/keyspace and then {@link #getRestoreIncludeCFList()} is applied
* to include the CF's/keyspaces.
*
* @return Column Family(ies), comma delimited, to include in restore. If no filter is applied,
* returns null.
*/
default String getRestoreIncludeCFList() {
return null;
}
/**
* Column family(ies), comma delimited, to exclude during restore. Note 1: The expected format
* is keyspace.cfname. If no value is provided then restore is scheduled for all KS,CF(s) Note
* 2: CF name allows special character "*" to denote all the columnfamilies in a given keyspace.
* e.g. keyspace1.* denotes all the CFs in keyspace1. Note 3: {@link #getRestoreExcludeCFList()}
* is applied first to exclude CF/keyspace and then {@link #getRestoreIncludeCFList()} is
* applied to include the CF's/keyspaces.
*
* @return Column Family(ies), comma delimited, to exclude from restore. If no filter is
* applied, returns null.
*/
default String getRestoreExcludeCFList() {
return null;
}
/**
* Specifies the start and end time used for restoring data (yyyyMMddHHmm format) Eg:
* 201201132030,201201142030
*
* @return Snapshot to be searched and restored
*/
default String getRestoreSnapshot() {
return StringUtils.EMPTY;
}
/** @return Get the region to connect to SDB for instance identity */
default String getSDBInstanceIdentityRegion() {
return "us-east-1";
}
/** @return true if it is a multi regional cluster */
default boolean isMultiDC() {
return false;
}
/** @return Number of backup threads for uploading files when using async feature */
default int getBackupThreads() {
return 2;
}
/** @return Number of download threads for downloading files when using async feature */
default int getRestoreThreads() {
return 8;
}
/** @return true if restore should search for nearest token if current token is not found */
default boolean isRestoreClosestToken() {
return false;
}
/**
* Amazon specific setting to query Additional/ Sibling ASG Memberships in csv format to
* consider while calculating RAC membership
*/
default String getSiblingASGNames() {
return ",";
}
/** Get the security group associated with nodes in this cluster */
default String getACLGroupName() {
return getAppName();
}
/** @return true if incremental backups are enabled */
default boolean isIncrementalBackupEnabled() {
return true;
}
/** @return Bytes per second to throttle for backups */
default int getUploadThrottle() {
return -1;
}
/**
* Get the throttle limit for API call of remote file system - get object exist. Default: 10.
* Use value of -1 to disable this.
*
* @return throttle limit for get object exist API call.
*/
default int getRemoteFileSystemObjectExistsThrottle() {
return -1;
}
/** @return true if Priam should local config file for tokens and seeds */
default boolean isLocalBootstrapEnabled() {
return false;
}
/** @return Compaction throughput */
default int getCompactionThroughput() {
return 8;
}
/** @return compaction_throughput_mb_per_sec */
default int getMaxHintWindowInMS() {
return 10800000;
}
/** @return hinted_handoff_throttle_in_kb */
default int getHintedHandoffThrottleKb() {
return 1024;
}
/** @return Size of Cassandra max direct memory */
default String getMaxDirectMemory() {
return "50G";
}
/** @return Bootstrap cluster name (depends on another cass cluster) */
default String getBootClusterName() {
return StringUtils.EMPTY;
}
/** @return Get the name of seed provider */
default String getSeedProviderName() {
return "com.netflix.priam.cassandra.extensions.NFSeedProvider";
}
/**
* memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) = 0.11
*
* @return memtable_cleanup_threshold in C* yaml
*/
default double getMemtableCleanupThreshold() {
return 0.11;
}
/** @return stream_throughput_outbound_megabits_per_sec in yaml */
default int getStreamingThroughputMB() {
return 400;
}
/**
* Get the paritioner for this cassandra cluster/node.
*
* @return the fully-qualified name of the partitioner class
*/
default String getPartitioner() {
return "org.apache.cassandra.dht.RandomPartitioner";
}
/** Support for c* 1.1 global key cache size */
default String getKeyCacheSizeInMB() {
return StringUtils.EMPTY;
}
/** Support for limiting the total number of keys in c* 1.1 global key cache. */
default String getKeyCacheKeysToSave() {
return StringUtils.EMPTY;
}
/** Support for c* 1.1 global row cache size */
default String getRowCacheSizeInMB() {
return StringUtils.EMPTY;
}
/** Support for limiting the total number of rows in c* 1.1 global row cache. */
default String getRowCacheKeysToSave() {
return StringUtils.EMPTY;
}
/** @return C* Process Name */
default String getCassProcessName() {
return "CassandraDaemon";
}
/** Defaults to 'allow all'. */
default String getAuthenticator() {
return "org.apache.cassandra.auth.AllowAllAuthenticator";
}
/** Defaults to 'allow all'. */
default String getAuthorizer() {
return "org.apache.cassandra.auth.AllowAllAuthorizer";
}
/** @return true/false, if Cassandra needs to be started manually */
default boolean doesCassandraStartManually() {
return false;
}
/** @return possible values: all, dc, none */
default String getInternodeCompression() {
return "all";
}
/**
* Enable/disable backup/restore of commit logs.
*
* @return boolean value true if commit log backup/restore is enabled, false otherwise. Default:
* false.
*/
default boolean isBackingUpCommitLogs() {
return false;
}
default String getCommitLogBackupPropsFile() {
return getCassHome() + "/conf/commitlog_archiving.properties";
}
default String getCommitLogBackupArchiveCmd() {
return "/bin/ln %path /mnt/data/backup/%name";
}
default String getCommitLogBackupRestoreCmd() {
return "/bin/mv %from %to";
}
default String getCommitLogBackupRestoreFromDirs() {
return "/mnt/data/backup/commitlog/";
}
default String getCommitLogBackupRestorePointInTime() {
return StringUtils.EMPTY;
}
default int maxCommitLogsRestore() {
return 10;
}
default boolean isClientSslEnabled() {
return false;
}
default String getInternodeEncryption() {
return "none";
}
default boolean isDynamicSnitchEnabled() {
return true;
}
default boolean isThriftEnabled() {
return true;
}
default boolean isNativeTransportEnabled() {
return true;
}
default int getConcurrentReadsCnt() {
return 32;
}
default int getConcurrentWritesCnt() {
return 32;
}
default int getConcurrentCompactorsCnt() {
return Runtime.getRuntime().availableProcessors();
}
default String getRpcServerType() {
return "hsha";
}
default int getRpcMinThreads() {
return 16;
}
default int getRpcMaxThreads() {
return 2048;
}
/*
* @return the warning threshold in MB's for large partitions encountered during compaction.
* Default value of 100 is used (default from cassandra.yaml)
*/
default int getCompactionLargePartitionWarnThresholdInMB() {
return 100;
}
default String getExtraConfigParams() {
return StringUtils.EMPTY;
}
String getCassYamlVal(String priamKey);
default boolean getAutoBoostrap() {
return true;
}
default boolean isCreateNewTokenEnable() {
return true;
}
/*
* @return the location on disk of the private key used by the cryptography algorithm
*/
default String getPrivateKeyLocation() {
return StringUtils.EMPTY;
}
/**
* @return the type of source for the restore. Valid values are: AWSCROSSACCT or GOOGLE. Note:
* for backward compatibility, this property should be optional. Specifically, if it does
* not exist, it should not cause an adverse impact on current functionality.
* <p>AWSCROSSACCT - You are restoring from an AWS account which requires cross account
* assumption where an IAM user in one account is allowed to access resources that belong to
* a different account.
* <p>GOOGLE - You are restoring from Google Cloud Storage
*/
default String getRestoreSourceType() {
return StringUtils.EMPTY;
}
/**
* Should backups be encrypted. If this is on, then all the files uploaded will be compressed
* and encrypted before being uploaded to remote file system.
*
* @return true to enable encryption of backup (snapshots, incrementals, commit logs). Note: for
* backward compatibility, this property should be optional. Specifically, if it does not
* exist, it should not cause an adverse impact on current functionality.
*/
default boolean isEncryptBackupEnabled() {
return false;
}
/**
* Data that needs to be restored is encrypted?
*
* @return true if data that needs to be restored is encrypted. Note that setting this value
* does not play any role until {@link #getRestoreSnapshot()} is set to a non-null value.
*/
default boolean isRestoreEncrypted() {
return false;
}
/**
* @return the Amazon Resource Name (ARN). This is applicable when restoring from an AWS account
* which requires cross account assumption. Note: for backward compatibility, this property
* should be optional. Specifically, if it does not exist, it should not cause an adverse
* impact on current functionality.
*/
default String getAWSRoleAssumptionArn() {
return StringUtils.EMPTY;
}
/**
* @return Google Cloud Storage service account id to be use within the restore functionality.
* Note: for backward compatibility, this property should be optional. Specifically, if it
* does not exist, it should not cause an adverse impact on current functionality.
*/
default String getGcsServiceAccountId() {
return StringUtils.EMPTY;
}
/**
* @return the absolute path on disk for the Google Cloud Storage PFX file (i.e. the combined
* format of the private key and certificate). This information is to be use within the
* restore functionality. Note: for backward compatibility, this property should be
* optional. Specifically, if it does not exist, it should not cause an adverse impact on
* current functionality.
*/
default String getGcsServiceAccountPrivateKeyLoc() {
return StringUtils.EMPTY;
}
/**
* @return the pass phrase use by PGP cryptography. This information is to be use within the
* restore and backup functionality when encryption is enabled. Note: for backward
* compatibility, this property should be optional. Specifically, if it does not exist, it
* should not cause an adverse impact on current functionality.
*/
default String getPgpPasswordPhrase() {
return StringUtils.EMPTY;
}
/**
* @return public key use by PGP cryptography. This information is to be use within the restore
* and backup functionality when encryption is enabled. Note: for backward compatibility,
* this property should be optional. Specifically, if it does not exist, it should not cause
* an adverse impact on current functionality.
*/
default String getPgpPublicKeyLoc() {
return StringUtils.EMPTY;
}
/**
* Use this method for adding extra/ dynamic cassandra startup options or env properties
*
* @return A map of extra paramaters.
*/
default Map<String, String> getExtraEnvParams() {
return Collections.EMPTY_MAP;
}
/*
* @return the Amazon Resource Name (ARN) for EC2 classic.
*/
default String getClassicEC2RoleAssumptionArn() {
return StringUtils.EMPTY;
}
/*
* @return the Amazon Resource Name (ARN) for VPC.
*/
default String getVpcEC2RoleAssumptionArn() {
return StringUtils.EMPTY;
}
/**
* Is cassandra cluster spanning more than one account. This may be true if you are migrating
* your cluster from one account to another.
*
* @return if the dual account support
*/
default boolean isDualAccount() {
return false;
}
/**
* Should incremental backup be uploaded in async fashion? If this is false, then incrementals
* will be in sync fashion.
*
* @return enable async incrementals for backup
*/
default boolean enableAsyncIncremental() {
return false;
}
/**
* Should snapshot backup be uploaded in async fashion? If this is false, then snapshot will be
* in sync fashion.
*
* @return enable async snapshot for backup
*/
default boolean enableAsyncSnapshot() {
return false;
}
/**
* Queue size to be used for backup uploads. Note that once queue is full, we would wait for
* {@link #getUploadTimeout()} to add any new item before declining the request and throwing
* exception.
*
* @return size of the queue for uploads.
*/
default int getBackupQueueSize() {
return 100000;
}
/**
* Queue size to be used for file downloads. Note that once queue is full, we would wait for
* {@link #getDownloadTimeout()} to add any new item before declining the request and throwing
* exception.
*
* @return size of the queue for downloads.
*/
default int getDownloadQueueSize() {
return 100000;
}
/**
* Uploads are scheduled in {@link #getBackupQueueSize()}. If queue is full then we wait for
* {@link #getUploadTimeout()} for the queue to have an entry available for queueing the current
* task after which we throw RejectedExecutionException.
*
* @return timeout for uploads to wait to blocking queue
*/
default long getUploadTimeout() {
return (2 * 60 * 60 * 1000L); // 2 minutes.
}
/**
* Downloads are scheduled in {@link #getDownloadQueueSize()}. If queue is full then we wait for
* {@link #getDownloadTimeout()} for the queue to have an entry available for queueing the
* current task after which we throw RejectedExecutionException.
*
* @return timeout for downloads to wait to blocking queue
*/
default long getDownloadTimeout() {
return (10 * 60 * 60 * 1000L); // 10 minutes.
}
/** @return tombstone_warn_threshold in C* yaml */
default int getTombstoneWarnThreshold() {
return 1000;
}
/** @return tombstone_failure_threshold in C* yaml */
default int getTombstoneFailureThreshold() {
return 100000;
}
/** @return streaming_socket_timeout_in_ms in C* yaml */
default int getStreamingSocketTimeoutInMS() {
return 86400000;
}
/**
* List of keyspaces to flush. Default: all keyspaces.
*
* @return a comma delimited list of keyspaces to flush
*/
default String getFlushKeyspaces() {
return StringUtils.EMPTY;
}
/**
* Cron expression to be used for flush. Use "-1" to disable the CRON. Default: -1
*
* @return Cron expression for flush
* @see <a
* href="http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html">quartz-scheduler</a>
* @see <a href="http://www.cronmaker.com">http://www.cronmaker.com</a> To build new cron timer
*/
default String getFlushCronExpression() {
return "-1";
}
/** @return the absolute path to store the backup status on disk */
default String getBackupStatusFileLoc() {
return getDataFileLocation() + File.separator + "backup.status";
}
/** @return Decides whether to use sudo to start C* or not */
default boolean useSudo() {
return true;
}
/**
* This flag is an easy way to enable/disable notifications as notification topics may be
* different per region. A notification is only sent when this flag is enabled and {@link
* #getBackupNotificationTopicArn()} is not empty.
*
* @return true if backup notification is enabled, false otherwise.
*/
default boolean enableBackupNotification() {
return true;
}
/**
* SNS Notification topic to be used for sending backup event notifications. One start event is
* sent before uploading any file and one complete/failure event is sent after the file is
* uploaded/failed. This applies to both incremental and snapshot. Default: no notifications
* i.e. this value is set to EMPTY VALUE
*
* @return SNS Topic ARN to be used to send notification.
*/
default String getBackupNotificationTopicArn() {
return StringUtils.EMPTY;
}
/**
* Post restore hook enabled state. If enabled, jar represented by getPostRepairHook is called
* once download of files is complete, before starting Cassandra.
*
* @return if post restore hook is enabled
*/
default boolean isPostRestoreHookEnabled() {
return false;
}
/**
* Post restore hook to be executed
*
* @return post restore hook to be executed once restore is complete
*/
default String getPostRestoreHook() {
return StringUtils.EMPTY;
}
/**
* HeartBeat file of post restore hook
*
* @return file that indicates heartbeat of post restore hook
*/
default String getPostRestoreHookHeartbeatFileName() {
return "postrestorehook_heartbeat";
}
/**
* Done file for post restore hook
*
* @return file that indicates completion of post restore hook
*/
default String getPostRestoreHookDoneFileName() {
return "postrestorehook_done";
}
/**
* Maximum time Priam has to wait for post restore hook sub-process to complete successfully
*
* @return time out for post restore hook in days
*/
default int getPostRestoreHookTimeOutInDays() {
return 2;
}
/**
* Heartbeat timeout (in ms) for post restore hook
*
* @return heartbeat timeout for post restore hook
*/
default int getPostRestoreHookHeartBeatTimeoutInMs() {
return 120000;
}
/**
* Heartbeat check frequency (in ms) for post restore hook
*
* @return heart beat check frequency for post restore hook
*/
default int getPostRestoreHookHeartbeatCheckFrequencyInMs() {
return 120000;
}
/**
* Grace period in days for the file that 'could' be output of a long-running compaction job.
* Note that cassandra creates output of the compaction as non-tmp-link files (whole SSTable)
* but are still not part of the final "view" and thus not part of a snapshot. Another common
* issue is "index.db" published "way" before other component files. Thus index file has
* modification time before other files .
*
* <p>This value is used to TTL the backups and to consider file which are forgotten by
* Cassandra. Default: 5
*
* @return grace period for the compaction output forgotten files.
*/
default int getGracePeriodDaysForCompaction() {
return 5;
}
/**
* Grace period in days for which a file is not considered forgotten by cassandra (that would be
* deleted by cassandra) as file could be used in the read path of the cassandra. Note that read
* path could imply streaming to a joining neighbor or for repair. When cassandra is done with a
* compaction, the input files to compaction, are removed from the "view" and thus not part of
* snapshot, but these files may very well be used for streaming, repair etc and thus cannot be
* removed.
*
* @return grace period in days for read path forgotten files.
*/
default int getForgottenFileGracePeriodDaysForRead() {
return 3;
}
/**
* If any forgotten file is found in Cassandra, it is usually good practice to move/delete them
* so when cassandra restarts, it does not load old data which should be removed else you may
* run into data resurrection issues. This behavior is fixed in 3.x. This configuration will
* allow Priam to move the forgotten files to a "lost_found" directory for user to review at
* later time at the same time ensuring that Cassandra does not resurrect data.
*
* @return true if Priam should move forgotten file to "lost_found" directory of that CF.
*/
default boolean isForgottenFileMoveEnabled() {
return false;
}
/**
* A method for allowing access to outside programs to Priam configuration when paired with the
* Priam configuration HTTP endpoint at /v1/config/structured/all/property
*
* @param group The group of configuration options to return, currently just returns everything
* no matter what
* @return A Map representation of this configuration, or null if the method doesn't exist
*/
@SuppressWarnings("unchecked")
@JsonIgnore
default Map<String, Object> getStructuredConfiguration(String group) {
ObjectMapper objectMapper = new ObjectMapper();
return objectMapper.convertValue(this, Map.class);
}
/**
* Cron expression to be used for persisting Priam merged configuration to disk. Use "-1" to
* disable the CRON. This will persist the fully merged value of Priam's configuration to the
* {@link #getMergedConfigurationDirectory()} as two JSON files: structured.json and
* unstructured.json which persist structured config and unstructured config respectively. We
* recommend you only rely on unstructured for the time being until the structured interface is
* finalized.
*
* <p>Default: every minute
*
* @return Cron expression for merged configuration writing
* @see <a
* href="http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html">quartz-scheduler</a>
* @see <a href="http://www.cronmaker.com">http://www.cronmaker.com</a> To build new cron timer
*/
default String getMergedConfigurationCronExpression() {
// Every minute on the top of the minute.
return "0 * * * * ? *";
}
/**
* Returns the path to the directory that Priam should write merged configuration to. Note that
* if you disable the merged configuration cron above {@link
* #getMergedConfigurationCronExpression()} then this directory is not created or used
*
* @return A string representation of the path to the merged priam configuration directory.
*/
default String getMergedConfigurationDirectory() {
return "/tmp/priam_configuration";
}
/**
* Return a list of property file paths from the configuration directory by Priam that should be
* tuned.
*
* @return the files paths
*/
default ImmutableSet<String> getTunablePropertyFiles() {
return ImmutableSet.of();
}
/**
* @return true to use private IPs for seeds and insertion into the Token DB false otherwise.
*/
default boolean usePrivateIP() {
return getSnitch().equals("org.apache.cassandra.locator.GossipingPropertyFileSnitch");
}
/** @return true to check is thrift listening on the rpc_port. */
default boolean checkThriftServerIsListening() {
return false;
}
/**
* @return true if Priam should skip deleting ingress rules for IPs not found in the token
* database.
*/
default boolean skipDeletingOthersIngressRules() {
return false;
}
/**
* @return true if Priam should skip updating ingress rules for ips found in the token database.
*/
default boolean skipUpdatingOthersIngressRules() {
return false;
}
/** @return get the threshold at which point we might risk not getting our ingress rule set. */
default int getACLSizeWarnThreshold() {
return 500;
}
/**
* @return BackupsToCompress UNCOMPRESSED means compress backups only when the files are not
* already compressed by Cassandra
*/
default BackupsToCompress getBackupsToCompress() {
return BackupsToCompress.ALL;
}
/*
* @return true if Priam should skip ingress on an IP address from the token database unless it
* can confirm that it is public
*/
default boolean skipIngressUnlessIPIsPublic() {
return false;
}
default boolean permitDirectTokenAssignmentWithGossipMismatch() {
return false;
}
/** returns how long a snapshot backup should take to upload in minutes */
default int getTargetMinutesToCompleteSnaphotUpload() {
return 0;
}
/**
* @return the percentage off of the old rate that the current rate must be to trigger a new
* rate in the dynamic rate limiter
*/
default double getRateLimitChangeThreshold() {
return 0.1;
}
default boolean addMD5ToBackupUploads() {
return false;
}
/**
* If a backup file's last-modified time is before this time, revert to SNAPPY compression.
* Otherwise, choose compression using the default logic based on getBackupsToCompress().
*
* @return the milliseconds since the epoch of the transition time.
*/
default long getCompressionTransitionEpochMillis() {
return 0L;
}
/** @return whether to enable auto_snapshot */
boolean getAutoSnapshot();
/** @return whether incremental backups should be skipped in a restore */
default boolean skipIncrementalRestore() {
return false;
}
/** @return Cassandra disk_failure_policy */
default String getDiskFailurePolicy() {
return "stop";
}
/**
* Escape hatch for getting any arbitrary property by key This is useful so we don't have to
* keep adding methods to this interface for every single configuration option ever. Also
* exposed via HTTP at v1/config/unstructured/X
*
* @param key The arbitrary configuration property to look up
* @param defaultValue The default value to return if the key is not found.
* @return The result for the property, or the defaultValue if provided (null otherwise)
*/
String getProperty(String key, String defaultValue);
}
| 3,227 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/config/BackupRestoreConfig.java | /**
* Copyright 2018 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.config;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableSet;
import com.netflix.priam.configSource.IConfigSource;
import javax.inject.Inject;
import org.apache.commons.lang3.StringUtils;
/** Implementation of IBackupRestoreConfig. Created by aagrawal on 6/26/18. */
public class BackupRestoreConfig implements IBackupRestoreConfig {
private final IConfigSource config;
@Inject
public BackupRestoreConfig(IConfigSource config) {
this.config = config;
}
@Override
public String getSnapshotMetaServiceCronExpression() {
return config.get("priam.snapshot.meta.cron", "-1");
}
@Override
public boolean enableV2Backups() {
return config.get("priam.enableV2Backups", false);
}
@Override
public boolean enableV2Restore() {
return config.get("priam.enableV2Restore", false);
}
@Override
public int getBackupTTLMonitorPeriodInSec() {
return config.get("priam.backupTTLMonitorPeriodInSec", 21600);
}
@Override
public int getBackupVerificationSLOInHours() {
return config.get("priam.backupVerificationSLOInHours", 24);
}
@Override
public String getBackupVerificationCronExpression() {
return config.get("priam.backupVerificationCronExpression", "0 30 0/1 1/1 * ? *");
}
@Override
public String getBackupNotifyComponentIncludeList() {
return config.get("priam.backupNotifyComponentIncludeList", StringUtils.EMPTY);
}
@Override
public ImmutableSet<String> getBackupNotificationAdditionalMessageAttrs() {
String value = config.get("priam.backupNotifyAdditionalMessageAttrs", StringUtils.EMPTY);
return ImmutableSet.copyOf(Splitter.on(",").omitEmptyStrings().trimResults().split(value));
}
}
| 3,228 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/config/BackupsToCompress.java | package com.netflix.priam.config;
public enum BackupsToCompress {
ALL,
IF_REQUIRED,
NONE
}
| 3,229 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/config/PriamConfiguration.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.config;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.netflix.priam.configSource.IConfigSource;
import com.netflix.priam.identity.config.InstanceInfo;
import com.netflix.priam.scheduler.UnsupportedTypeException;
import com.netflix.priam.tuner.GCType;
import java.io.File;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Singleton
public class PriamConfiguration implements IConfiguration {
public static final String PRIAM_PRE = "priam";
private final IConfigSource config;
private static final Logger logger = LoggerFactory.getLogger(PriamConfiguration.class);
@JsonIgnore private InstanceInfo instanceInfo;
@Inject
public PriamConfiguration(IConfigSource config, InstanceInfo instanceInfo) {
this.config = config;
this.instanceInfo = instanceInfo;
}
@Override
public void initialize() {
this.config.initialize(instanceInfo.getAutoScalingGroup(), instanceInfo.getRegion());
}
@Override
public String getCassStartupScript() {
return config.get(PRIAM_PRE + ".cass.startscript", "/etc/init.d/cassandra start");
}
@Override
public String getCassStopScript() {
return config.get(PRIAM_PRE + ".cass.stopscript", "/etc/init.d/cassandra stop");
}
@Override
public int getGracefulDrainHealthWaitSeconds() {
return -1;
}
@Override
public int getRemediateDeadCassandraRate() {
return config.get(
PRIAM_PRE + ".remediate.dead.cassandra.rate", 3600); // Default to once per hour
}
@Override
public String getCassHome() {
return config.get(PRIAM_PRE + ".cass.home", "/etc/cassandra");
}
@Override
public String getBackupLocation() {
return config.get(PRIAM_PRE + ".s3.base_dir", "backup");
}
@Override
public String getBackupPrefix() {
return config.get(PRIAM_PRE + ".s3.bucket", "cassandra-archive");
}
@Override
public int getBackupRetentionDays() {
return config.get(PRIAM_PRE + ".backup.retention", 0);
}
@Override
public List<String> getBackupRacs() {
return config.getList(PRIAM_PRE + ".backup.racs");
}
@Override
public String getRestorePrefix() {
return config.get(PRIAM_PRE + ".restore.prefix");
}
@Override
public String getDataFileLocation() {
return config.get(PRIAM_PRE + ".data.location", getCassandraBaseDirectory() + "/data");
}
@Override
public String getLogDirLocation() {
return config.get(PRIAM_PRE + ".logs.location", getCassandraBaseDirectory() + "/logs");
}
@Override
public String getCacheLocation() {
return config.get(
PRIAM_PRE + ".cache.location", getCassandraBaseDirectory() + "/saved_caches");
}
@Override
public String getCommitLogLocation() {
return config.get(
PRIAM_PRE + ".commitlog.location", getCassandraBaseDirectory() + "/commitlog");
}
@Override
public String getBackupCommitLogLocation() {
return config.get(PRIAM_PRE + ".backup.commitlog.location", "");
}
@Override
public long getBackupChunkSize() {
long size = config.get(PRIAM_PRE + ".backup.chunksizemb", 10);
return size * 1024 * 1024L;
}
@Override
public int getJmxPort() {
return config.get(PRIAM_PRE + ".jmx.port", 7199);
}
@Override
public String getJmxUsername() {
return config.get(PRIAM_PRE + ".jmx.username", "");
}
@Override
public String getJmxPassword() {
return config.get(PRIAM_PRE + ".jmx.password", "");
}
/** @return Enables Remote JMX connections n C* */
@Override
public boolean enableRemoteJMX() {
return config.get(PRIAM_PRE + ".jmx.remote.enable", false);
}
public int getNativeTransportPort() {
return config.get(PRIAM_PRE + ".nativeTransport.port", 9042);
}
@Override
public int getThriftPort() {
return config.get(PRIAM_PRE + ".thrift.port", 9160);
}
@Override
public int getStoragePort() {
return config.get(PRIAM_PRE + ".storage.port", 7000);
}
@Override
public int getSSLStoragePort() {
return config.get(PRIAM_PRE + ".ssl.storage.port", 7001);
}
@Override
public String getSnitch() {
return config.get(PRIAM_PRE + ".endpoint_snitch", "org.apache.cassandra.locator.Ec2Snitch");
}
@Override
public String getAppName() {
return config.get(PRIAM_PRE + ".clustername", "cass_cluster");
}
@Override
public List<String> getRacs() {
return config.getList(PRIAM_PRE + ".zones.available", instanceInfo.getDefaultRacks());
}
@Override
public String getHeapSize() {
return config.get((PRIAM_PRE + ".heap.size.") + instanceInfo.getInstanceType(), "8G");
}
@Override
public String getHeapNewSize() {
return config.get(
(PRIAM_PRE + ".heap.newgen.size.") + instanceInfo.getInstanceType(), "2G");
}
@Override
public String getMaxDirectMemory() {
return config.get(
(PRIAM_PRE + ".direct.memory.size.") + instanceInfo.getInstanceType(), "50G");
}
@Override
public String getBackupCronExpression() {
return config.get(PRIAM_PRE + ".backup.cron", "0 0 12 1/1 * ? *"); // Backup daily at 12
}
@Override
public GCType getGCType() throws UnsupportedTypeException {
String gcType = config.get(PRIAM_PRE + ".gc.type", GCType.CMS.getGcType());
return GCType.lookup(gcType);
}
@Override
public String getJVMExcludeSet() {
return config.get(PRIAM_PRE + ".jvm.options.exclude");
}
@Override
public String getJVMUpsertSet() {
return config.get(PRIAM_PRE + ".jvm.options.upsert");
}
@Override
public String getFlushCronExpression() {
return config.get(PRIAM_PRE + ".flush.cron", "-1");
}
@Override
public String getCompactionCronExpression() {
return config.get(PRIAM_PRE + ".compaction.cron", "-1");
}
@Override
public String getCompactionIncludeCFList() {
return config.get(PRIAM_PRE + ".compaction.cf.include");
}
@Override
public String getCompactionExcludeCFList() {
return config.get(PRIAM_PRE + ".compaction.cf.exclude");
}
@Override
public String getSnapshotIncludeCFList() {
return config.get(PRIAM_PRE + ".snapshot.cf.include");
}
@Override
public String getSnapshotExcludeCFList() {
return config.get(PRIAM_PRE + ".snapshot.cf.exclude");
}
@Override
public String getIncrementalIncludeCFList() {
return config.get(PRIAM_PRE + ".incremental.cf.include");
}
@Override
public String getIncrementalExcludeCFList() {
return config.get(PRIAM_PRE + ".incremental.cf.exclude");
}
@Override
public String getRestoreIncludeCFList() {
return config.get(PRIAM_PRE + ".restore.cf.include");
}
@Override
public String getRestoreExcludeCFList() {
return config.get(PRIAM_PRE + ".restore.cf.exclude");
}
@Override
public String getRestoreSnapshot() {
return config.get(PRIAM_PRE + ".restore.snapshot", "");
}
@Override
public boolean isRestoreEncrypted() {
return config.get(PRIAM_PRE + ".encrypted.restore.enabled", false);
}
@Override
public String getSDBInstanceIdentityRegion() {
return config.get(PRIAM_PRE + ".sdb.instanceIdentity.region", "us-east-1");
}
@Override
public boolean isMultiDC() {
return config.get(PRIAM_PRE + ".multiregion.enable", false);
}
@Override
public int getBackupThreads() {
return config.get(PRIAM_PRE + ".backup.threads", 2);
}
@Override
public int getRestoreThreads() {
return config.get(PRIAM_PRE + ".restore.threads", 8);
}
@Override
public boolean isRestoreClosestToken() {
return config.get(PRIAM_PRE + ".restore.closesttoken", false);
}
@Override
public String getSiblingASGNames() {
return config.get(PRIAM_PRE + ".az.sibling.asgnames", ",");
}
@Override
public String getACLGroupName() {
return config.get(PRIAM_PRE + ".acl.groupname", this.getAppName());
}
@Override
public boolean isIncrementalBackupEnabled() {
return config.get(PRIAM_PRE + ".backup.incremental.enable", true);
}
@Override
public int getUploadThrottle() {
return config.get(PRIAM_PRE + ".upload.throttle", -1);
}
@Override
public int getRemoteFileSystemObjectExistsThrottle() {
return config.get(PRIAM_PRE + ".remoteFileSystemObjectExistThrottle", -1);
}
@Override
public boolean isLocalBootstrapEnabled() {
return config.get(PRIAM_PRE + ".localbootstrap.enable", false);
}
@Override
public int getCompactionThroughput() {
return config.get(PRIAM_PRE + ".compaction.throughput", 8);
}
@Override
public int getMaxHintWindowInMS() {
return config.get(PRIAM_PRE + ".hint.window", 10800000);
}
public int getHintedHandoffThrottleKb() {
return config.get(PRIAM_PRE + ".hints.throttleKb", 1024);
}
@Override
public String getBootClusterName() {
return config.get(PRIAM_PRE + ".bootcluster", "");
}
@Override
public String getSeedProviderName() {
return config.get(
PRIAM_PRE + ".seed.provider",
"com.netflix.priam.cassandra.extensions.NFSeedProvider");
}
public double getMemtableCleanupThreshold() {
return config.get(PRIAM_PRE + ".memtable.cleanup.threshold", 0.11);
}
@Override
public int getStreamingThroughputMB() {
return config.get(PRIAM_PRE + ".streaming.throughput.mb", 400);
}
public String getPartitioner() {
return config.get(PRIAM_PRE + ".partitioner", "org.apache.cassandra.dht.RandomPartitioner");
}
public String getKeyCacheSizeInMB() {
return config.get(PRIAM_PRE + ".keyCache.size");
}
public String getKeyCacheKeysToSave() {
return config.get(PRIAM_PRE + ".keyCache.count");
}
public String getRowCacheSizeInMB() {
return config.get(PRIAM_PRE + ".rowCache.size");
}
public String getRowCacheKeysToSave() {
return config.get(PRIAM_PRE + ".rowCache.count");
}
@Override
public String getCassProcessName() {
return config.get(PRIAM_PRE + ".cass.process", "CassandraDaemon");
}
public String getYamlLocation() {
return config.get(PRIAM_PRE + ".yamlLocation", getCassHome() + "/conf/cassandra.yaml");
}
@Override
public boolean supportsTuningJVMOptionsFile() {
return config.get(PRIAM_PRE + ".jvm.options.supported", false);
}
@Override
public String getJVMOptionsFileLocation() {
return config.get(PRIAM_PRE + ".jvm.options.location", getCassHome() + "/conf/jvm.options");
}
public String getAuthenticator() {
return config.get(
PRIAM_PRE + ".authenticator", "org.apache.cassandra.auth.AllowAllAuthenticator");
}
public String getAuthorizer() {
return config.get(
PRIAM_PRE + ".authorizer", "org.apache.cassandra.auth.AllowAllAuthorizer");
}
@Override
public boolean doesCassandraStartManually() {
return config.get(PRIAM_PRE + ".cass.manual.start.enable", false);
}
public String getInternodeCompression() {
return config.get(PRIAM_PRE + ".internodeCompression", "all");
}
@Override
public boolean isBackingUpCommitLogs() {
return config.get(PRIAM_PRE + ".clbackup.enabled", false);
}
@Override
public String getCommitLogBackupPropsFile() {
return config.get(
PRIAM_PRE + ".clbackup.propsfile",
getCassHome() + "/conf/commitlog_archiving.properties");
}
@Override
public String getCommitLogBackupArchiveCmd() {
return config.get(
PRIAM_PRE + ".clbackup.archiveCmd", "/bin/ln %path /mnt/data/backup/%name");
}
@Override
public String getCommitLogBackupRestoreCmd() {
return config.get(PRIAM_PRE + ".clbackup.restoreCmd", "/bin/mv %from %to");
}
@Override
public String getCommitLogBackupRestoreFromDirs() {
return config.get(PRIAM_PRE + ".clbackup.restoreDirs", "/mnt/data/backup/commitlog/");
}
@Override
public String getCommitLogBackupRestorePointInTime() {
return config.get(PRIAM_PRE + ".clbackup.restoreTime", "");
}
@Override
public int maxCommitLogsRestore() {
return config.get(PRIAM_PRE + ".clrestore.max", 10);
}
public boolean isClientSslEnabled() {
return config.get(PRIAM_PRE + ".client.sslEnabled", false);
}
public String getInternodeEncryption() {
return config.get(PRIAM_PRE + ".internodeEncryption", "none");
}
public boolean isDynamicSnitchEnabled() {
return config.get(PRIAM_PRE + ".dsnitchEnabled", true);
}
public boolean isThriftEnabled() {
return config.get(PRIAM_PRE + ".thrift.enabled", true);
}
public boolean isNativeTransportEnabled() {
return config.get(PRIAM_PRE + ".nativeTransport.enabled", false);
}
public int getConcurrentReadsCnt() {
return config.get(PRIAM_PRE + ".concurrentReads", 32);
}
public int getConcurrentWritesCnt() {
return config.get(PRIAM_PRE + ".concurrentWrites", 32);
}
public int getConcurrentCompactorsCnt() {
int cpus = Runtime.getRuntime().availableProcessors();
return config.get(PRIAM_PRE + ".concurrentCompactors", cpus);
}
public String getRpcServerType() {
return config.get(PRIAM_PRE + ".rpc.server.type", "hsha");
}
public int getRpcMinThreads() {
return config.get(PRIAM_PRE + ".rpc.min.threads", 16);
}
public int getRpcMaxThreads() {
return config.get(PRIAM_PRE + ".rpc.max.threads", 2048);
}
@Override
public int getCompactionLargePartitionWarnThresholdInMB() {
return config.get(PRIAM_PRE + ".compaction.large.partition.warn.threshold", 100);
}
public String getExtraConfigParams() {
return config.get(PRIAM_PRE + ".extra.params");
}
@Override
public Map<String, String> getExtraEnvParams() {
String envParams = config.get(PRIAM_PRE + ".extra.env.params");
if (envParams == null) {
logger.info("getExtraEnvParams: No extra env params");
return null;
}
Map<String, String> extraEnvParamsMap = new HashMap<>();
String[] pairs = envParams.split(",");
logger.info("getExtraEnvParams: Extra cass params. From config :{}", envParams);
for (String pair1 : pairs) {
String[] pair = pair1.split("=");
if (pair.length > 1) {
String priamKey = pair[0];
String cassKey = pair[1];
String cassVal = config.get(priamKey);
logger.info(
"getExtraEnvParams: Start-up/ env params: Priamkey[{}], CassStartupKey[{}], Val[{}]",
priamKey,
cassKey,
cassVal);
if (!StringUtils.isBlank(cassKey) && !StringUtils.isBlank(cassVal)) {
extraEnvParamsMap.put(cassKey, cassVal);
}
}
}
return extraEnvParamsMap;
}
public String getCassYamlVal(String priamKey) {
return config.get(priamKey);
}
public boolean getAutoBoostrap() {
return config.get(PRIAM_PRE + ".auto.bootstrap", true);
}
@Override
public boolean isCreateNewTokenEnable() {
return config.get(PRIAM_PRE + ".create.new.token.enable", true);
}
@Override
public String getPrivateKeyLocation() {
return config.get(PRIAM_PRE + ".private.key.location");
}
@Override
public String getRestoreSourceType() {
return config.get(PRIAM_PRE + ".restore.source.type");
}
@Override
public boolean isEncryptBackupEnabled() {
return config.get(PRIAM_PRE + ".encrypted.backup.enabled", false);
}
@Override
public String getAWSRoleAssumptionArn() {
return config.get(PRIAM_PRE + ".roleassumption.arn");
}
@Override
public String getClassicEC2RoleAssumptionArn() {
return config.get(PRIAM_PRE + ".ec2.roleassumption.arn");
}
@Override
public String getVpcEC2RoleAssumptionArn() {
return config.get(PRIAM_PRE + ".vpc.roleassumption.arn");
}
@Override
public boolean isDualAccount() {
return config.get(PRIAM_PRE + ".roleassumption.dualaccount", false);
}
@Override
public String getGcsServiceAccountId() {
return config.get(PRIAM_PRE + ".gcs.service.acct.id");
}
@Override
public String getGcsServiceAccountPrivateKeyLoc() {
return config.get(
PRIAM_PRE + ".gcs.service.acct.private.key",
"/apps/tomcat/conf/gcsentryptedkey.p12");
}
@Override
public String getPgpPasswordPhrase() {
return config.get(PRIAM_PRE + ".pgp.password.phrase");
}
@Override
public String getPgpPublicKeyLoc() {
return config.get(PRIAM_PRE + ".pgp.pubkey.file.location");
}
@Override
public boolean enableAsyncIncremental() {
return config.get(PRIAM_PRE + ".async.incremental", false);
}
@Override
public boolean enableAsyncSnapshot() {
return config.get(PRIAM_PRE + ".async.snapshot", false);
}
@Override
public int getBackupQueueSize() {
return config.get(PRIAM_PRE + ".backup.queue.size", 100000);
}
@Override
public int getDownloadQueueSize() {
return config.get(PRIAM_PRE + ".download.queue.size", 100000);
}
@Override
public long getUploadTimeout() {
return config.get(PRIAM_PRE + ".upload.timeout", (2 * 60 * 60 * 1000L));
}
public long getDownloadTimeout() {
return config.get(PRIAM_PRE + ".download.timeout", (10 * 60 * 60 * 1000L));
}
@Override
public int getTombstoneWarnThreshold() {
return config.get(PRIAM_PRE + ".tombstone.warning.threshold", 1000);
}
@Override
public int getTombstoneFailureThreshold() {
return config.get(PRIAM_PRE + ".tombstone.failure.threshold", 100000);
}
@Override
public int getStreamingSocketTimeoutInMS() {
return config.get(PRIAM_PRE + ".streaming.socket.timeout.ms", 86400000);
}
@Override
public String getFlushKeyspaces() {
return config.get(PRIAM_PRE + ".flush.keyspaces");
}
@Override
public String getBackupStatusFileLoc() {
return config.get(
PRIAM_PRE + ".backup.status.location",
getDataFileLocation() + File.separator + "backup.status");
}
@Override
public boolean useSudo() {
return config.get(PRIAM_PRE + ".cass.usesudo", true);
}
@Override
public boolean enableBackupNotification() {
return config.get(PRIAM_PRE + ".enableBackupNotification", true);
}
@Override
public String getBackupNotificationTopicArn() {
return config.get(PRIAM_PRE + ".backup.notification.topic.arn", "");
}
@Override
public boolean isPostRestoreHookEnabled() {
return config.get(PRIAM_PRE + ".postrestorehook.enabled", false);
}
@Override
public String getPostRestoreHook() {
return config.get(PRIAM_PRE + ".postrestorehook");
}
@Override
public String getPostRestoreHookHeartbeatFileName() {
return config.get(
PRIAM_PRE + ".postrestorehook.heartbeat.filename",
getDataFileLocation() + File.separator + "postrestorehook_heartbeat");
}
@Override
public String getPostRestoreHookDoneFileName() {
return config.get(
PRIAM_PRE + ".postrestorehook.done.filename",
getDataFileLocation() + File.separator + "postrestorehook_done");
}
@Override
public int getPostRestoreHookTimeOutInDays() {
return config.get(PRIAM_PRE + ".postrestorehook.timeout.in.days", 2);
}
@Override
public int getPostRestoreHookHeartBeatTimeoutInMs() {
return config.get(PRIAM_PRE + ".postrestorehook.heartbeat.timeout", 120000);
}
@Override
public int getPostRestoreHookHeartbeatCheckFrequencyInMs() {
return config.get(PRIAM_PRE + ".postrestorehook.heartbeat.check.frequency", 120000);
}
@Override
public String getProperty(String key, String defaultValue) {
return config.get(key, defaultValue);
}
@Override
public String getMergedConfigurationCronExpression() {
// Every minute on the top of the minute.
return config.get(PRIAM_PRE + ".configMerge.cron", "0 * * * * ? *");
}
@Override
public int getGracePeriodDaysForCompaction() {
return config.get(PRIAM_PRE + ".gracePeriodDaysForCompaction", 5);
}
@Override
public int getForgottenFileGracePeriodDaysForRead() {
return config.get(PRIAM_PRE + ".forgottenFileGracePeriodDaysForRead", 3);
}
@Override
public boolean isForgottenFileMoveEnabled() {
return config.get(PRIAM_PRE + ".forgottenFileMoveEnabled", false);
}
@Override
public boolean checkThriftServerIsListening() {
return config.get(PRIAM_PRE + ".checkThriftServerIsListening", false);
}
@Override
public BackupsToCompress getBackupsToCompress() {
return BackupsToCompress.valueOf(
config.get("priam.backupsToCompress", BackupsToCompress.ALL.name()));
}
@Override
public boolean permitDirectTokenAssignmentWithGossipMismatch() {
return config.get(PRIAM_PRE + ".permitDirectTokenAssignmentWithGossipMismatch", false);
}
@Override
public int getTargetMinutesToCompleteSnaphotUpload() {
return config.get(PRIAM_PRE + ".snapshotUploadDuration", 0);
}
@Override
public double getRateLimitChangeThreshold() {
return config.get(PRIAM_PRE + ".rateLimitChangeThreshold", 0.1);
}
@Override
public boolean getAutoSnapshot() {
return config.get(PRIAM_PRE + ".autoSnapshot", false);
}
}
| 3,230 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cryptography/IFileCryptography.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.cryptography;
import java.io.InputStream;
import java.util.Iterator;
public interface IFileCryptography {
/**
* @param in - a handle to the encrypted, compressed data stream
* @param passwd - pass phrase used to extract the PGP private key from the encrypted content.
* @param objectName - name of the object we are decrypting, currently use for debugging
* purposes only.
* @return a handle to the decrypted, uncompress data stream.
*/
InputStream decryptStream(InputStream in, char[] passwd, String objectName) throws Exception;
/**
* @param is - a handle to the plaintext data stream
* @return - an iterate of the ciphertext stream
*/
Iterator<byte[]> encryptStream(InputStream is, String fileName) throws Exception;
}
| 3,231 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cryptography/CryptographyAlgorithm.java | package com.netflix.priam.cryptography;
public enum CryptographyAlgorithm {
PLAINTEXT,
PGP
}
| 3,232 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cryptography | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cryptography/pgp/PgpUtil.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.cryptography.pgp;
import java.io.BufferedInputStream;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.security.NoSuchProviderException;
import java.util.Iterator;
import org.bouncycastle.openpgp.*;
public class PgpUtil {
/**
* Search a secret key ring collection for a secret key corresponding to keyID if it exists.
*
* @param pgpSec a secret key ring collection.
* @param keyID keyID we want.
* @param pass passphrase to decrypt secret key with.
* @return secret or private key corresponding to the keyID.
* @throws PGPException if there is any exception in getting the PGP key corresponding to the ID
* provided.
* @throws NoSuchProviderException If PGP Provider is not available.
*/
public static PGPPrivateKey findSecretKey(
PGPSecretKeyRingCollection pgpSec, long keyID, char[] pass)
throws PGPException, NoSuchProviderException {
PGPSecretKey pgpSecKey = pgpSec.getSecretKey(keyID);
if (pgpSecKey == null) {
return null;
}
return pgpSecKey.extractPrivateKey(pass, "BC");
}
public static PGPPublicKey readPublicKey(String fileName) throws IOException, PGPException {
InputStream keyIn = new BufferedInputStream(new FileInputStream(fileName));
PGPPublicKey pubKey = readPublicKey(keyIn);
keyIn.close();
return pubKey;
}
/**
* A simple routine that opens a key ring file and loads the first available key suitable for
* encryption.
*
* @param input inputstream to the pgp file key ring.
* @return PGP key from the key ring.
* @throws IOException If any error in reading from the input stream.
* @throws PGPException if there is any error in getting key from key ring.
*/
@SuppressWarnings("rawtypes")
public static PGPPublicKey readPublicKey(InputStream input) throws IOException, PGPException {
PGPPublicKeyRingCollection pgpPub =
new PGPPublicKeyRingCollection(PGPUtil.getDecoderStream(input));
//
// we just loop through the collection till we find a key suitable for encryption, in the
// real
// world you would probably want to be a bit smarter about this.
//
Iterator keyRingIter = pgpPub.getKeyRings();
while (keyRingIter.hasNext()) {
PGPPublicKeyRing keyRing = (PGPPublicKeyRing) keyRingIter.next();
Iterator keyIter = keyRing.getPublicKeys();
while (keyIter.hasNext()) {
PGPPublicKey key = (PGPPublicKey) keyIter.next();
if (key.isEncryptionKey()) {
return key;
}
}
}
throw new IllegalArgumentException("Can't find encryption key in key ring.");
}
}
| 3,233 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cryptography | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cryptography/pgp/PgpCredential.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.cryptography.pgp;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.cred.ICredentialGeneric;
import javax.inject.Inject;
/*
* A generic implementation of fetch keys as plaintext. The key values are used within PGP cryptography algorithm. Users may
* want to provide an implementation where your key(s)' value is decrypted using AES encryption algorithm.
*/
public class PgpCredential implements ICredentialGeneric {
private final IConfiguration config;
@Inject
public PgpCredential(IConfiguration config) {
this.config = config;
}
@Override
public AWSCredentialsProvider getAwsCredentialProvider() {
return null;
}
@Override
public byte[] getValue(KEY key) {
if (key == null) {
throw new NullPointerException("Credential key cannot be null.");
}
if (key.equals(KEY.PGP_PASSWORD)) {
return this.config.getPgpPasswordPhrase().getBytes();
} else if (key.equals(KEY.PGP_PUBLIC_KEY_LOC)) {
return this.config.getPgpPublicKeyLoc().getBytes();
} else {
throw new IllegalArgumentException("Key value not supported.");
}
}
}
| 3,234 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cryptography | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cryptography/pgp/PgpCryptography.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.cryptography.pgp;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.cryptography.IFileCryptography;
import java.io.*;
import java.security.NoSuchProviderException;
import java.security.SecureRandom;
import java.security.Security;
import java.util.Date;
import java.util.Iterator;
import javax.inject.Inject;
import org.apache.commons.io.IOUtils;
import org.bouncycastle.jce.provider.BouncyCastleProvider;
import org.bouncycastle.openpgp.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class PgpCryptography implements IFileCryptography {
private static final Logger logger = LoggerFactory.getLogger(PgpCryptography.class);
private IConfiguration config;
static {
// tell the JVM the security provider is PGP
Security.addProvider(new BouncyCastleProvider());
}
@Inject
public PgpCryptography(IConfiguration config) {
this.config = config;
}
private PGPSecretKeyRingCollection getPgpSecurityCollection() {
InputStream keyIn;
try {
keyIn = new BufferedInputStream(new FileInputStream(config.getPrivateKeyLocation()));
} catch (FileNotFoundException e) {
throw new IllegalStateException(
"PGP private key file not found. file: " + config.getPrivateKeyLocation());
}
try {
return new PGPSecretKeyRingCollection(PGPUtil.getDecoderStream(keyIn));
} catch (Exception e) {
logger.error(
"Exception in reading PGP security collection ring. Msg: {}",
e.getLocalizedMessage());
throw new IllegalStateException("Exception in reading PGP security collection ring", e);
}
}
private PGPPublicKey getPubKey() {
InputStream pubKeyIS;
try {
pubKeyIS = new BufferedInputStream(new FileInputStream(config.getPgpPublicKeyLoc()));
} catch (FileNotFoundException e) {
logger.error(
"Exception in reading PGP security collection ring. Msg: {}",
e.getLocalizedMessage());
throw new RuntimeException("Exception in reading PGP public key", e);
}
try {
return PgpUtil.readPublicKey(pubKeyIS);
} catch (Exception e) {
throw new RuntimeException("Exception in reading & deriving the PGP public key.", e);
}
}
/*
* @param in - a handle to the encrypted, compressed data stream
* @param pass - pass phrase used to extract the PGP private key from the encrypted content.
* @param objectName - name of the object used only for debugging purposes
* @return a handle to the decrypted, uncompress data stream.
*/
@Override
public InputStream decryptStream(InputStream in, char[] passwd, String objectName)
throws Exception {
logger.info("Start to decrypt object: {}", objectName);
in = PGPUtil.getDecoderStream(in);
// general class for reading a stream of data.
PGPObjectFactory inPgpReader = new PGPObjectFactory(in);
Object o = inPgpReader.nextObject();
PGPEncryptedDataList encryptedDataList;
// the first object might be a PGP marker packet.
if (o instanceof PGPEncryptedDataList) encryptedDataList = (PGPEncryptedDataList) o;
else
// first object was a marker, the real data is the next one.
encryptedDataList = (PGPEncryptedDataList) inPgpReader.nextObject();
// get the iterator so we can iterate through all the encrypted data.
Iterator encryptedDataIterator = encryptedDataList.getEncryptedDataObjects();
// to be use for decryption
PGPPrivateKey privateKey = null;
// a handle to the encrypted data stream
PGPPublicKeyEncryptedData encryptedDataStreamHandle = null;
while (privateKey == null && encryptedDataIterator.hasNext()) {
// a handle to the encrypted data stream
encryptedDataStreamHandle = (PGPPublicKeyEncryptedData) encryptedDataIterator.next();
try {
privateKey =
findSecretKey(
getPgpSecurityCollection(),
encryptedDataStreamHandle.getKeyID(),
passwd);
} catch (Exception ex) {
throw new IllegalStateException(
"decryption exception: object: "
+ objectName
+ ", Exception when fetching private key using key: "
+ encryptedDataStreamHandle.getKeyID(),
ex);
}
}
if (privateKey == null)
throw new IllegalStateException(
"decryption exception: object: "
+ objectName
+ ", Private key for message not found.");
// finally, lets decrypt the object
InputStream decryptInputStream = encryptedDataStreamHandle.getDataStream(privateKey, "BC");
PGPObjectFactory decryptedDataReader = new PGPObjectFactory(decryptInputStream);
// the decrypted data object is compressed, lets decompress it.
// get a handle to the decrypted, compress data stream
PGPCompressedData compressedDataReader =
(PGPCompressedData) decryptedDataReader.nextObject();
InputStream compressedStream =
new BufferedInputStream(compressedDataReader.getDataStream());
PGPObjectFactory compressedStreamReader = new PGPObjectFactory(compressedStream);
Object data = compressedStreamReader.nextObject();
if (data instanceof PGPLiteralData) {
PGPLiteralData dataPgpReader = (PGPLiteralData) data;
// a handle to the decrypted, uncompress data stream
return dataPgpReader.getInputStream();
} else if (data instanceof PGPOnePassSignatureList) {
throw new PGPException(
"decryption exception: object: "
+ objectName
+ ", encrypted data contains a signed message - not literal data.");
} else {
throw new PGPException(
"decryption exception: object: "
+ objectName
+ ", data is not a simple encrypted file - type unknown.");
}
}
/*
* Extract the PGP private key from the encrypted content. Since the PGP key file contains N number of keys, this method will fetch the
* private key by "keyID".
*
* @param securityCollection - handle to the PGP key file.
* @param keyID - fetch private key for this value.
* @param pass - pass phrase used to extract the PGP private key from the encrypted content.
* @return PGP private key, null if not found.
*/
private static PGPPrivateKey findSecretKey(
PGPSecretKeyRingCollection securityCollection, long keyID, char[] pass)
throws PGPException, NoSuchProviderException {
PGPSecretKey privateKey = securityCollection.getSecretKey(keyID);
if (privateKey == null) {
return null;
}
return privateKey.extractPrivateKey(pass, "BC");
}
@Override
public Iterator<byte[]> encryptStream(InputStream is, String fileName) {
return new ChunkEncryptorStream(is, fileName, getPubKey());
}
public class ChunkEncryptorStream implements Iterator<byte[]> {
// Chunk sizes of 10 MB
private static final int MAX_CHUNK = 10 * 1024 * 1024;
private boolean hasnext = true;
private final InputStream is;
private final InputStream encryptedSrc;
private final ByteArrayOutputStream bos;
private final BufferedOutputStream pgout;
public ChunkEncryptorStream(InputStream is, String fileName, PGPPublicKey pubKey) {
this.is = is;
this.bos = new ByteArrayOutputStream();
this.pgout = new BufferedOutputStream(this.bos);
this.encryptedSrc = new EncryptedInputStream(this.is, fileName, pubKey);
}
@Override
public boolean hasNext() {
return this.hasnext;
}
/*
* Fill and return a buffer of the data within encrypted stream.
*
* @return a buffer of ciphertext
*/
@Override
public byte[] next() {
try {
byte buffer[] = new byte[2048];
int count;
while ((count = encryptedSrc.read(buffer, 0, buffer.length)) != -1) {
pgout.write(buffer, 0, count);
if (bos.size() >= MAX_CHUNK) return returnSafe();
}
// flush remaining data in buffer and close resources.
return done();
} catch (Exception e) {
throw new RuntimeException(
"Error encountered returning next chunk of ciphertext. Msg: "
+ e.getLocalizedMessage(),
e);
}
}
@Override
public void remove() {
// TODO Auto-generated method stub
}
/*
* Copy the data in the buffer to the output[] and then reset the buffer to the beginning.
*/
private byte[] returnSafe() {
byte[] returnData = this.bos.toByteArray();
this.bos.reset();
return returnData;
}
/*
* flush remaining data in buffer and close resources.
*/
private byte[] done() throws IOException {
pgout.flush(); // flush whatever is in the buffer to the output stream
this.hasnext = false; // tell clients that there is no more data
byte[] returnData = this.bos.toByteArray();
IOUtils.closeQuietly(pgout); // close the handle to the buffered output
IOUtils.closeQuietly(bos); // close the handle to the actual output
return returnData;
}
}
public class EncryptedInputStream extends InputStream {
private final InputStream srcHandle; // handle to the source stream
private ByteArrayOutputStream bos = null; // Handle to encrypted stream
private int bosOff = 0; // current position within encrypted stream
private OutputStream
pgpBosWrapper; // wrapper around the buffer which will contain the encrypted data.
private OutputStream encryptedOsWrapper; // handle to the encrypted data
private PGPCompressedDataGenerator
compressedDataGenerator; // a means to compress data using PGP
private String fileName; // TODO: eliminate once debugging is completed.
public EncryptedInputStream(InputStream is, String fileName, PGPPublicKey pubKey) {
this.srcHandle = is;
this.bos = new ByteArrayOutputStream();
// creates a cipher stream which will have an integrity packet associated with it
PGPEncryptedDataGenerator encryptedDataGenerator =
new PGPEncryptedDataGenerator(
PGPEncryptedData.CAST5, true, new SecureRandom(), "BC");
try {
// Add a key encryption method to be used to encrypt the session data associated
// with this encrypted data
encryptedDataGenerator.addMethod(pubKey);
// wrapper around the buffer which will contain the encrypted data.
pgpBosWrapper = encryptedDataGenerator.open(bos, new byte[1 << 15]);
} catch (Exception e) {
throw new RuntimeException(
"Exception when wrapping PGP around our output stream", e);
}
// a means to compress data using PGP
this.compressedDataGenerator =
new PGPCompressedDataGenerator(PGPCompressedData.UNCOMPRESSED);
/*
* Open a literal data packet, returning a stream to store the data inside the packet as an indefinite stream.
* A "literal data packet" in PGP world is the body of a message; data that is not to be further interpreted.
*
* The stream is written out as a series of partial packets with a chunk size determine by the size of the passed in buffer.
* @param outputstream - the stream we want the packet in
* @param format - the format we are using.
* @param filename
* @param the time of last modification we want stored.
* @param the buffer to use for collecting data to put into chunks.
*/
try {
PGPLiteralDataGenerator literalDataGenerator = new PGPLiteralDataGenerator();
this.encryptedOsWrapper =
literalDataGenerator.open(
compressedDataGenerator.open(pgpBosWrapper),
PGPLiteralData.BINARY,
fileName,
new Date(),
new byte[1 << 15]);
} catch (Exception e) {
throw new RuntimeException(
"Exception when creating the PGP encrypted wrapper around the output stream.",
e);
}
this.fileName = fileName; // TODO: eliminate once debugging is completed.
}
/*
* Read a chunk from input stream and perform encryption.
*
*
* @param buffer for this behavior to store the encrypted behavior
* @param starting position within buffer to append
* @param max number of bytes to store in buffer
*/
@Override
public synchronized int read(byte b[], int off, int len) throws IOException {
if (this.bosOff < this.bos.size()) {
// if here, you still have data in the encrypted stream, lets give it to the client
return copyToBuff(b, off, len);
}
// If here, it's time to read the next chunk from the input and do the encryption.
this.bos.reset();
this.bosOff = 0;
// == read up to "len" or end of file from input stream and encrypt it.
byte[] buff = new byte[1 << 16];
int bytesRead = 0; // num of bytes read from the source input stream
while (this.bos.size() < len && (bytesRead = this.srcHandle.read(buff, 0, len)) > 0) {
// lets process each chunk from input until we fill our output
// stream or we reach end of input
this.encryptedOsWrapper.write(buff, 0, bytesRead);
}
if (bytesRead < 0) {
// we have read everything from the source input, lets perform cleanup on
// any resources.
this.encryptedOsWrapper.close();
this.compressedDataGenerator.close();
this.pgpBosWrapper.close();
}
if (bytesRead < 0 && this.bos.size() == 0) {
// if here, read all the bytes from the input and there is nothing in the encrypted
// stream.
return bytesRead;
}
/*
* If here, one of the following occurred:
* 1. you read data from the input and encrypted it.
* 2. there was no more data in the input but you still had some data in the encrypted stream.
*
*/
return copyToBuff(b, off, len);
}
/*
*
* Copy the bytes from the encrypted stream to an output buffer
*
* @param output buffer
* @param starting point within output buffer
* @param max size of output buffer
* @return number of bytes copied from the encrypted stream to the output buffer
*/
private int copyToBuff(byte[] buff, int off, int len) {
/*
* num of bytes to copy within encrypted stream = (current size of bytes within encrypted stream - current position within encrypted stream) < size of output buffer,
* then copy what is in the encrypted stream; otherwise, copy up to the max size of the output buffer.
*/
int wlen =
(this.bos.size() - this.bosOff) < len ? (this.bos.size() - this.bosOff) : len;
// copy data within encrypted stream to the output buffer
System.arraycopy(this.bos.toByteArray(), this.bosOff, buff, off, wlen);
// now update the current position within the encrypted stream
this.bosOff = this.bosOff + wlen;
return wlen;
}
@Override
public void close() throws IOException {
this.encryptedOsWrapper.close();
this.compressedDataGenerator.close();
this.pgpBosWrapper.close();
}
@Override
public int read() throws IOException {
throw new UnsupportedOperationException(
"Not supported, invoke read(byte[] bytes, int off, int len) instead.");
}
}
}
| 3,235 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/notification/BackupNotificationMgr.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.notification;
import com.amazonaws.services.sns.model.MessageAttributeValue;
import com.netflix.priam.backup.AbstractBackupPath;
import com.netflix.priam.backup.BackupRestoreException;
import com.netflix.priam.config.IBackupRestoreConfig;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.identity.InstanceIdentity;
import com.netflix.priam.identity.config.InstanceInfo;
import java.time.Instant;
import java.util.*;
import javax.inject.Inject;
import org.apache.commons.lang3.StringUtils;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A means to notify interested party(ies) of an uploaded file, success or failed.
*
* <p>Created by vinhn on 10/30/16.
*/
public class BackupNotificationMgr {
private static final Logger logger = LoggerFactory.getLogger(BackupNotificationMgr.class);
private final IConfiguration config;
private final IBackupRestoreConfig backupRestoreConfig;
private final INotificationService notificationService;
private final InstanceInfo instanceInfo;
private final InstanceIdentity instanceIdentity;
private final Set<AbstractBackupPath.BackupFileType> notifiedBackupFileTypesSet;
private String notifiedBackupFileTypes;
@Inject
public BackupNotificationMgr(
IConfiguration config,
IBackupRestoreConfig backupRestoreConfig,
INotificationService notificationService,
InstanceInfo instanceInfo,
InstanceIdentity instanceIdentity) {
this.config = config;
this.backupRestoreConfig = backupRestoreConfig;
this.notificationService = notificationService;
this.instanceInfo = instanceInfo;
this.instanceIdentity = instanceIdentity;
this.notifiedBackupFileTypesSet = new HashSet<>();
this.notifiedBackupFileTypes = "";
}
public void notify(String remotePath, Instant snapshotInstant) {
JSONObject jsonObject = new JSONObject();
try {
jsonObject.put("s3bucketname", this.config.getBackupPrefix());
jsonObject.put("s3clustername", config.getAppName());
jsonObject.put("s3namespace", remotePath);
jsonObject.put("region", instanceInfo.getRegion());
jsonObject.put("rack", instanceInfo.getRac());
jsonObject.put("token", instanceIdentity.getInstance().getToken());
jsonObject.put(
"backuptype", AbstractBackupPath.BackupFileType.SNAPSHOT_VERIFIED.name());
jsonObject.put("snapshotInstant", snapshotInstant);
// SNS Attributes for filtering messages. Cluster name and backup file type.
Map<String, MessageAttributeValue> messageAttributes = getMessageAttributes(jsonObject);
this.notificationService.notify(jsonObject.toString(), messageAttributes);
} catch (JSONException exception) {
logger.error(
"JSON exception during generation of notification for snapshot verification: {}. path: {}, time: {}",
remotePath,
snapshotInstant,
exception.getLocalizedMessage());
}
}
private Map<String, MessageAttributeValue> getMessageAttributes(JSONObject message)
throws JSONException {
Map<String, MessageAttributeValue> attributes = new HashMap<>();
attributes.put("s3clustername", toStringAttribute(message.getString("s3clustername")));
attributes.put("backuptype", toStringAttribute(message.getString("backuptype")));
for (String attr : backupRestoreConfig.getBackupNotificationAdditionalMessageAttrs()) {
if (message.has(attr)) {
attributes.put(attr, toStringAttribute(String.valueOf(message.get(attr))));
}
}
return attributes;
}
private MessageAttributeValue toStringAttribute(String value) {
return new MessageAttributeValue().withDataType("String").withStringValue(value);
}
public void notify(AbstractBackupPath abp, UploadStatus uploadStatus) {
JSONObject jsonObject = new JSONObject();
try {
Set<AbstractBackupPath.BackupFileType> updatedNotifiedBackupFileTypeSet =
getUpdatedNotifiedBackupFileTypesSet(this.notifiedBackupFileTypes);
if (updatedNotifiedBackupFileTypeSet.isEmpty()
|| updatedNotifiedBackupFileTypeSet.contains(abp.getType())) {
jsonObject.put("s3bucketname", this.config.getBackupPrefix());
jsonObject.put("s3clustername", abp.getClusterName());
jsonObject.put("s3namespace", abp.getRemotePath());
jsonObject.put("keyspace", abp.getKeyspace());
jsonObject.put("cf", abp.getColumnFamily());
jsonObject.put("region", abp.getRegion());
jsonObject.put("rack", instanceInfo.getRac());
jsonObject.put("token", abp.getToken());
jsonObject.put("filename", abp.getFileName());
jsonObject.put("uncompressfilesize", abp.getSize());
jsonObject.put("compressfilesize", abp.getCompressedFileSize());
jsonObject.put("backuptype", abp.getType().name());
jsonObject.put("uploadstatus", uploadStatus.name().toLowerCase());
jsonObject.put("compression", abp.getCompression().name());
jsonObject.put("encryption", abp.getEncryption().name());
jsonObject.put("isincremental", abp.isIncremental());
// SNS Attributes for filtering messages. Cluster name and backup file type.
Map<String, MessageAttributeValue> messageAttributes =
getMessageAttributes(jsonObject);
this.notificationService.notify(jsonObject.toString(), messageAttributes);
} else {
logger.debug(
"BackupFileType {} is not in the list of notified component types {}",
abp.getType().name(),
StringUtils.join(notifiedBackupFileTypesSet, ", "));
}
} catch (JSONException exception) {
logger.error(
"JSON exception during generation of notification for upload {}. Local file {}. Ignoring to continue with rest of backup. Msg: {}",
uploadStatus,
abp.getFileName(),
exception.getLocalizedMessage());
}
}
private Set<AbstractBackupPath.BackupFileType> getUpdatedNotifiedBackupFileTypesSet(
String notifiedBackupFileTypes) {
String propertyValue = this.backupRestoreConfig.getBackupNotifyComponentIncludeList();
if (!notifiedBackupFileTypes.equals(propertyValue)) {
logger.info(
String.format(
"Notified BackupFileTypes changed from %s to %s",
this.notifiedBackupFileTypes, propertyValue));
this.notifiedBackupFileTypesSet.clear();
this.notifiedBackupFileTypes =
this.backupRestoreConfig.getBackupNotifyComponentIncludeList();
if (!StringUtils.isBlank(this.notifiedBackupFileTypes)) {
for (String s : this.notifiedBackupFileTypes.split(",")) {
try {
AbstractBackupPath.BackupFileType backupFileType =
AbstractBackupPath.BackupFileType.fromString(s.trim());
notifiedBackupFileTypesSet.add(backupFileType);
} catch (BackupRestoreException ignored) {
}
}
}
}
return Collections.unmodifiableSet(this.notifiedBackupFileTypesSet);
}
}
| 3,236 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/notification/AWSSnsNotificationService.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.notification;
import com.amazonaws.services.sns.AmazonSNS;
import com.amazonaws.services.sns.AmazonSNSClient;
import com.amazonaws.services.sns.model.MessageAttributeValue;
import com.amazonaws.services.sns.model.PublishRequest;
import com.amazonaws.services.sns.model.PublishResult;
import com.netflix.priam.aws.IAMCredential;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.identity.config.InstanceInfo;
import com.netflix.priam.merics.BackupMetrics;
import com.netflix.priam.utils.BoundedExponentialRetryCallable;
import java.util.Map;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/*
* A single, persisted, connection to Amazon SNS.
*/
@Singleton
public class AWSSnsNotificationService implements INotificationService {
private static final Logger logger = LoggerFactory.getLogger(AWSSnsNotificationService.class);
private final IConfiguration configuration;
private final AmazonSNS snsClient;
private final BackupMetrics backupMetrics;
@Inject
public AWSSnsNotificationService(
IConfiguration config,
IAMCredential iamCredential,
BackupMetrics backupMetrics,
InstanceInfo instanceInfo) {
this.configuration = config;
this.backupMetrics = backupMetrics;
String ec2_region = instanceInfo.getRegion();
snsClient =
AmazonSNSClient.builder()
.withCredentials(iamCredential.getAwsCredentialProvider())
.withRegion(ec2_region)
.build();
}
@Override
public void notify(
final String msg, final Map<String, MessageAttributeValue> messageAttributes) {
// e.g. arn:aws:sns:eu-west-1:1234:eu-west-1-cass-sample-backup
final String topic_arn = this.configuration.getBackupNotificationTopicArn();
if (!configuration.enableBackupNotification() || StringUtils.isEmpty(topic_arn)) {
return;
}
PublishResult publishResult;
try {
publishResult =
new BoundedExponentialRetryCallable<PublishResult>() {
@Override
public PublishResult retriableCall() throws Exception {
PublishRequest publishRequest =
new PublishRequest(topic_arn, msg)
.withMessageAttributes(messageAttributes);
return snsClient.publish(publishRequest);
}
}.call();
} catch (Exception e) {
logger.error(
String.format(
"Exhausted retries. Publishing notification metric for failure and moving on. Failed msg to publish: %s",
msg),
e);
backupMetrics.incrementSnsNotificationFailure();
return;
}
// If here, message was published. As a extra validation, ensure we have a msg id
String publishedMsgId = publishResult.getMessageId();
if (publishedMsgId == null || publishedMsgId.isEmpty()) {
backupMetrics.incrementSnsNotificationFailure();
return;
}
backupMetrics.incrementSnsNotificationSuccess();
if (logger.isTraceEnabled()) {
logger.trace("Published msg: {} aws sns messageId - {}", msg, publishedMsgId);
}
}
}
| 3,237 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/notification/INotificationService.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.notification;
import com.amazonaws.services.sns.model.MessageAttributeValue;
import com.google.inject.ImplementedBy;
import java.util.Map;
/** Service to notify of a message. Created by vinhn on 11/3/16. */
@ImplementedBy(AWSSnsNotificationService.class)
public interface INotificationService {
/**
* Notify the message.
*
* @param msg Message that needs to be notified
*/
void notify(String msg, Map<String, MessageAttributeValue> messageAttributes);
}
| 3,238 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/notification/UploadStatus.java | package com.netflix.priam.notification;
public enum UploadStatus {
STARTED,
SUCCESS,
FAILED
}
| 3,239 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/google/GoogleFileIterator.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.google;
import com.google.api.services.storage.Storage;
import com.google.api.services.storage.model.StorageObject;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
/*
* Represents a list of objects within Google Cloud Storage (GCS)
*/
public class GoogleFileIterator implements Iterator<String> {
private Iterator<String> iterator;
private String bucketName;
private String prefix;
private Storage.Objects objectsResoruceHandle = null;
private Storage.Objects.List listObjectsSrvcHandle = null;
private com.google.api.services.storage.model.Objects objectsContainerHandle = null;
public GoogleFileIterator(Storage gcsStorageHandle, String bucket, String prefix) {
this.objectsResoruceHandle = gcsStorageHandle.objects();
this.bucketName = bucket;
this.prefix = prefix;
try { // == Get the initial page of results
this.iterator = createIterator();
} catch (Exception e) {
throw new RuntimeException(
"Exception encountered fetching elements, msg: ." + e.getLocalizedMessage(), e);
}
}
private void initListing() {
try {
this.listObjectsSrvcHandle =
objectsResoruceHandle.list(bucketName); // == list objects within bucket
// fetch elements within bucket that matches this prefix
this.listObjectsSrvcHandle.setPrefix(this.prefix);
} catch (IOException e) {
throw new RuntimeException("Unable to get gcslist handle to bucket: " + bucketName, e);
}
}
/*
* Fetch a page of results
*/
private Iterator<String> createIterator() throws Exception {
if (listObjectsSrvcHandle == null) initListing();
List<String> temp = Lists.newArrayList(); // a container of results
// Sends the metadata request to the server and returns the parsed metadata response.
this.objectsContainerHandle = listObjectsSrvcHandle.execute();
for (StorageObject object : this.objectsContainerHandle.getItems()) {
// processing a page of results
temp.add(object.getName());
}
return temp.iterator();
}
@Override
public boolean hasNext() {
if (this.iterator.hasNext()) {
return true;
}
while (this.objectsContainerHandle.getNextPageToken() != null && !iterator.hasNext())
try { // if here, you have iterated through all elements of the previous page, now, get
// the next page of results
this.listObjectsSrvcHandle.setPageToken(objectsContainerHandle.getNextPageToken());
this.iterator = createIterator();
} catch (Exception e) {
throw new RuntimeException(
"Exception encountered fetching elements, see previous messages for details.",
e);
}
return this.iterator.hasNext();
}
@Override
public String next() {
return iterator.next();
}
}
| 3,240 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/google/GcsCredential.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.google;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.cred.ICredentialGeneric;
import javax.inject.Inject;
/*
* A generic implementation of fetch keys as plaintext. The key values are used with Google Cloud Storage. Users may
* want to provide an implementation where your key(s)' value is decrypted using AES encryption algorithm.
*/
public class GcsCredential implements ICredentialGeneric {
private final IConfiguration config;
@Inject
public GcsCredential(IConfiguration config) {
this.config = config;
}
@Override
public AWSCredentialsProvider getAwsCredentialProvider() {
// TODO Auto-generated method stub
return null;
}
@Override
public byte[] getValue(KEY key) {
if (key == null) {
throw new NullPointerException("Credential key cannot be null.");
}
if (key.equals(KEY.GCS_PRIVATE_KEY_LOC)) {
return this.config.getGcsServiceAccountPrivateKeyLoc().getBytes();
} else if (key.equals(KEY.GCS_SERVICE_ID)) {
return this.config.getGcsServiceAccountId().getBytes();
} else {
throw new IllegalArgumentException("Key value not supported.");
}
}
}
| 3,241 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/google/GoogleEncryptedFileSystem.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.google;
import com.google.api.client.auth.oauth2.Credential;
import com.google.api.client.googleapis.auth.oauth2.GoogleCredential;
import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport;
import com.google.api.client.http.HttpTransport;
import com.google.api.client.json.JsonFactory;
import com.google.api.client.json.jackson2.JacksonFactory;
import com.google.api.services.storage.Storage;
import com.google.api.services.storage.StorageScopes;
import com.netflix.priam.backup.AbstractBackupPath;
import com.netflix.priam.backup.AbstractFileSystem;
import com.netflix.priam.backup.BackupRestoreException;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.cred.ICredentialGeneric;
import com.netflix.priam.cred.ICredentialGeneric.KEY;
import com.netflix.priam.merics.BackupMetrics;
import com.netflix.priam.notification.BackupNotificationMgr;
import java.io.*;
import java.nio.file.Path;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Provider;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class GoogleEncryptedFileSystem extends AbstractFileSystem {
private static final Logger logger = LoggerFactory.getLogger(GoogleEncryptedFileSystem.class);
private static final String APPLICATION_NAME = "gdl";
private static final JsonFactory JSON_FACTORY = JacksonFactory.getDefaultInstance();
private HttpTransport httpTransport;
// represents our "service account" credentials we will use to access GCS
private Credential credential;
private Storage gcsStorageHandle;
private Storage.Objects objectsResoruceHandle = null;
private String srcBucketName;
private final IConfiguration config;
private final ICredentialGeneric gcsCredential;
private final BackupMetrics backupMetrics;
@Inject
public GoogleEncryptedFileSystem(
Provider<AbstractBackupPath> pathProvider,
final IConfiguration config,
@Named("gcscredential") ICredentialGeneric credential,
BackupMetrics backupMetrics,
BackupNotificationMgr backupNotificationManager) {
super(config, backupMetrics, backupNotificationManager, pathProvider);
this.backupMetrics = backupMetrics;
this.config = config;
this.gcsCredential = credential;
try {
this.httpTransport = GoogleNetHttpTransport.newTrustedTransport();
} catch (Exception e) {
throw new IllegalStateException(
"Unable to create a handle to the Google Http tranport", e);
}
this.srcBucketName = getShard();
}
private Storage.Objects constructObjectResourceHandle() {
if (this.objectsResoruceHandle != null) {
return this.objectsResoruceHandle;
}
constructGcsStorageHandle();
this.objectsResoruceHandle = this.gcsStorageHandle.objects();
return this.objectsResoruceHandle;
}
/*
* Get a handle to the GCS api to manage our data within their storage. Code derive from
* https://code.google.com/p/google-api-java-client/source/browse/storage-cmdline-sample/src/main/java/com/google/api/services/samples/storage/cmdline/StorageSample.java?repo=samples
*
* Note: GCS storage will use our credential to do auto-refresh of expired tokens
*/
private Storage constructGcsStorageHandle() {
if (this.gcsStorageHandle != null) {
return this.gcsStorageHandle;
}
try {
constructGcsCredential();
} catch (Exception e) {
throw new IllegalStateException("Exception during GCS authorization", e);
}
this.gcsStorageHandle =
new Storage.Builder(this.httpTransport, JSON_FACTORY, this.credential)
.setApplicationName(APPLICATION_NAME)
.build();
return this.gcsStorageHandle;
}
/**
* Authorizes the installed application to access user's protected data, code from
* https://developers.google.com/maps-engine/documentation/oauth/serviceaccount and
* http://javadoc.google-api-java-client.googlecode.com/hg/1.8.0-beta/com/google/api/client/googleapis/auth/oauth2/GoogleCredential.html
*/
private Credential constructGcsCredential() throws Exception {
if (this.credential != null) {
return this.credential;
}
synchronized (this) {
if (this.credential == null) {
String service_acct_email =
new String(this.gcsCredential.getValue(KEY.GCS_SERVICE_ID));
if (this.config.getGcsServiceAccountPrivateKeyLoc() == null
|| this.config.getGcsServiceAccountPrivateKeyLoc().isEmpty()) {
throw new NullPointerException(
"Fast property for the the GCS private key file is null/empty.");
}
// Take the encrypted private key, decrypted into an in-transit file which is passed
// to GCS
File gcsPrivateKeyHandle =
new File(this.config.getGcsServiceAccountPrivateKeyLoc() + ".output");
ByteArrayOutputStream byteos = new ByteArrayOutputStream();
byte[] gcsPrivateKeyPlainText =
this.gcsCredential.getValue(KEY.GCS_PRIVATE_KEY_LOC);
try (BufferedOutputStream bos =
new BufferedOutputStream(new FileOutputStream(gcsPrivateKeyHandle))) {
byteos.write(gcsPrivateKeyPlainText);
byteos.writeTo(bos);
} catch (IOException e) {
throw new IOException(
"Exception when writing decrypted gcs private key value to disk.", e);
}
Collection<String> scopes = new ArrayList<>(1);
scopes.add(StorageScopes.DEVSTORAGE_READ_ONLY);
// Cryptex decrypted service account key derive from the GCS console
this.credential =
new GoogleCredential.Builder()
.setTransport(this.httpTransport)
.setJsonFactory(JSON_FACTORY)
.setServiceAccountId(service_acct_email)
.setServiceAccountScopes(scopes)
.setServiceAccountPrivateKeyFromP12File(gcsPrivateKeyHandle)
.build();
}
}
return this.credential;
}
@Override
protected void downloadFileImpl(AbstractBackupPath path, String suffix)
throws BackupRestoreException {
String remotePath = path.getRemotePath();
File localFile = new File(path.newRestoreFile().getAbsolutePath() + suffix);
String objectName = parseObjectname(getPrefix().toString());
com.google.api.services.storage.Storage.Objects.Get get;
try {
get = constructObjectResourceHandle().get(this.srcBucketName, remotePath);
} catch (IOException e) {
throw new BackupRestoreException(
"IO error retrieving metadata for: "
+ objectName
+ " from bucket: "
+ this.srcBucketName,
e);
}
// If you're not using GCS' AppEngine, download the whole thing (instead of chunks) in one
// request, if possible.
get.getMediaHttpDownloader().setDirectDownloadEnabled(true);
try (OutputStream os = new FileOutputStream(localFile);
InputStream is = get.executeMediaAsInputStream()) {
IOUtils.copyLarge(is, os);
} catch (IOException e) {
throw new BackupRestoreException(
"IO error during streaming of object: "
+ objectName
+ " from bucket: "
+ this.srcBucketName,
e);
} catch (Exception ex) {
throw new BackupRestoreException(
"Exception encountered when copying bytes from input to output", ex);
}
backupMetrics.recordDownloadRate(get.getLastResponseHeaders().getContentLength());
}
@Override
protected boolean doesRemoteFileExist(Path remotePath) {
// TODO: Implement based on GCS. Since this is only used for upload, leaving it empty
return false;
}
@Override
public Iterator<String> listFileSystem(String prefix, String delimiter, String marker) {
return new GoogleFileIterator(constructGcsStorageHandle(), prefix, null);
}
@Override
public void cleanup() {
// TODO Auto-generated method stub
}
@Override
public void shutdown() {
// TODO Auto-generated method stub
}
@Override
protected long uploadFileImpl(AbstractBackupPath path, Instant target)
throws BackupRestoreException {
throw new UnsupportedOperationException();
}
@Override
public long getFileSize(String remotePath) throws BackupRestoreException {
return 0;
}
@Override
public void deleteFiles(List<Path> remotePaths) throws BackupRestoreException {
// TODO: Delete implementation
}
/*
* @param pathPrefix
* @return objectName
*/
static String parseObjectname(String pathPrefix) {
int offset = pathPrefix.lastIndexOf(0x2f);
return pathPrefix.substring(offset + 1);
}
}
| 3,242 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/health/IThriftChecker.java | package com.netflix.priam.health;
import com.google.inject.ImplementedBy;
@ImplementedBy(ThriftChecker.class)
public interface IThriftChecker {
boolean isThriftServerListening();
}
| 3,243 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/health/ThriftChecker.java | package com.netflix.priam.health;
import com.netflix.priam.config.IConfiguration;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.concurrent.TimeUnit;
import javax.inject.Inject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ThriftChecker implements IThriftChecker {
private static final Logger logger = LoggerFactory.getLogger(ThriftChecker.class);
protected final IConfiguration config;
@Inject
public ThriftChecker(IConfiguration config) {
this.config = config;
}
public boolean isThriftServerListening() {
if (!config.checkThriftServerIsListening()) {
return true;
}
String[] cmd =
new String[] {
"/bin/sh", "-c", "ss -tuln | grep -c " + config.getThriftPort(), " 2>/dev/null"
};
Process process = null;
try {
process = Runtime.getRuntime().exec(cmd);
process.waitFor(1, TimeUnit.SECONDS);
} catch (Exception e) {
logger.warn("Exception while executing the process: ", e);
}
if (process != null) {
try (BufferedReader reader =
new BufferedReader(new InputStreamReader(process.getInputStream())); ) {
if (Integer.parseInt(reader.readLine()) == 0) {
logger.info(
"Could not find anything listening on the rpc port {}!",
config.getThriftPort());
return false;
}
} catch (Exception e) {
logger.warn("Exception while reading the input stream: ", e);
}
}
// A quiet on-call is our top priority, err on the side of avoiding false positives by
// default.
return true;
}
}
| 3,244 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/health/CassandraMonitor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.health;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.connection.JMXNodeTool;
import com.netflix.priam.defaultimpl.ICassandraProcess;
import com.netflix.priam.merics.CassMonitorMetrics;
import com.netflix.priam.scheduler.SimpleTimer;
import com.netflix.priam.scheduler.Task;
import com.netflix.priam.scheduler.TaskTimer;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.cassandra.tools.NodeProbe;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/*
* This task checks if the Cassandra process is running.
*/
@Singleton
public class CassandraMonitor extends Task {
public static final String JOBNAME = "CASS_MONITOR_THREAD";
private static final Logger logger = LoggerFactory.getLogger(CassandraMonitor.class);
private static final AtomicBoolean isCassandraStarted = new AtomicBoolean(false);
private final InstanceState instanceState;
private final ICassandraProcess cassProcess;
private final CassMonitorMetrics cassMonitorMetrics;
private final IThriftChecker thriftChecker;
@Inject
protected CassandraMonitor(
IConfiguration config,
InstanceState instanceState,
ICassandraProcess cassProcess,
CassMonitorMetrics cassMonitorMetrics,
IThriftChecker thriftChecker) {
super(config);
this.instanceState = instanceState;
this.cassProcess = cassProcess;
this.cassMonitorMetrics = cassMonitorMetrics;
this.thriftChecker = thriftChecker;
}
@Override
public void execute() throws Exception {
try {
checkRequiredDirectories();
instanceState.setIsRequiredDirectoriesExist(true);
} catch (IllegalStateException e) {
instanceState.setIsRequiredDirectoriesExist(false);
}
Process process = null;
BufferedReader input = null;
try {
// This returns pid for the Cassandra process
// This needs to be sent as command list as "pipe" of results is not allowed. Also, do
// not try to change
// with pgrep as it has limitation of 4K command list (cassandra command can go upto 5-6
// KB as cassandra lists all the libraries in command.
final String[] cmd = {
"/bin/sh",
"-c",
"ps -ef |grep -v -P \"\\sgrep\\s\" | grep " + config.getCassProcessName()
};
process = Runtime.getRuntime().exec(cmd);
input = new BufferedReader(new InputStreamReader(process.getInputStream()));
String line = input.readLine();
if (line != null) {
// Setting cassandra flag to true
instanceState.setCassandraProcessAlive(true);
isCassandraStarted.set(true);
NodeProbe bean = JMXNodeTool.instance(this.config);
instanceState.setIsGossipActive(bean.isGossipRunning());
instanceState.setIsNativeTransportActive(bean.isNativeTransportRunning());
instanceState.setIsThriftActive(
bean.isThriftServerRunning() && thriftChecker.isThriftServerListening());
} else {
// Setting cassandra flag to false
instanceState.setCassandraProcessAlive(false);
isCassandraStarted.set(false);
}
} catch (Exception e) {
logger.warn("Exception thrown while checking if Cassandra is running or not ", e);
instanceState.setCassandraProcessAlive(false);
isCassandraStarted.set(false);
} finally {
if (process != null) {
IOUtils.closeQuietly(process.getInputStream());
IOUtils.closeQuietly(process.getOutputStream());
IOUtils.closeQuietly(process.getErrorStream());
}
if (input != null) IOUtils.closeQuietly(input);
}
try {
int rate = config.getRemediateDeadCassandraRate();
if (rate >= 0 && !config.doesCassandraStartManually()) {
if (instanceState.shouldCassandraBeAlive()
&& !instanceState.isCassandraProcessAlive()) {
long msNow = System.currentTimeMillis();
if (rate == 0
|| ((instanceState.getLastAttemptedStartTime() + rate * 1000)
< msNow)) {
cassMonitorMetrics.incCassAutoStart();
cassProcess.start(true);
instanceState.markLastAttemptedStartTime();
}
}
}
} catch (IOException e) {
logger.warn("Failed to remediate dead Cassandra", e);
}
}
private void checkRequiredDirectories() {
checkDirectory(config.getDataFileLocation());
checkDirectory(config.getBackupCommitLogLocation());
checkDirectory(config.getCommitLogLocation());
checkDirectory(config.getCacheLocation());
}
private void checkDirectory(String directory) {
checkDirectory(new File(directory));
}
private void checkDirectory(File directory) {
if (!directory.exists())
throw new IllegalStateException(
String.format("Directory: %s does not exist", directory));
if (!directory.canRead() || !directory.canWrite())
throw new IllegalStateException(
String.format(
"Directory: %s does not have read/write permissions.", directory));
}
public static TaskTimer getTimer() {
return new SimpleTimer(JOBNAME, 10L * 1000);
}
@Override
public String getName() {
return JOBNAME;
}
public static Boolean hasCassadraStarted() {
return isCassandraStarted.get();
}
// Added for testing only
public static void setIsCassadraStarted() {
// Setting cassandra flag to true
isCassandraStarted.set(true);
}
}
| 3,245 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/health/InstanceState.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.health;
import com.netflix.priam.backup.BackupMetadata;
import com.netflix.priam.backup.Status;
import com.netflix.priam.utils.GsonJsonSerializer;
import java.time.LocalDateTime;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import javax.inject.Inject;
import javax.inject.Singleton;
/**
* Contains the state of the health of processed managed by Priam, and maintains the isHealthy flag
* used for reporting discovery health check.
*
* <p>Created by aagrawal on 9/19/17.
*/
@Singleton
public class InstanceState {
// Cassandra process status
private final AtomicBoolean isCassandraProcessAlive = new AtomicBoolean(false);
private final AtomicBoolean shouldCassandraBeAlive = new AtomicBoolean(false);
private final AtomicLong lastAttemptedStartTime = new AtomicLong(Long.MAX_VALUE);
private final AtomicBoolean isGossipActive = new AtomicBoolean(false);
private final AtomicBoolean isThriftActive = new AtomicBoolean(false);
private final AtomicBoolean isNativeTransportActive = new AtomicBoolean(false);
private final AtomicBoolean isRequiredDirectoriesExist = new AtomicBoolean(false);
private final AtomicBoolean isYmlWritten = new AtomicBoolean(false);
private final AtomicBoolean isHealthy = new AtomicBoolean(false);
private final AtomicBoolean isHealthyOverride = new AtomicBoolean(true);
// This is referenced when this class is serialized to a String
private BackupMetadata backupStatus;
// Restore status
private final RestoreStatus restoreStatus;
@Inject
public InstanceState(RestoreStatus restoreStatus) {
this.restoreStatus = restoreStatus;
}
@Override
public String toString() {
return GsonJsonSerializer.getGson().toJson(this);
}
public boolean isGossipActive() {
return isGossipActive.get();
}
public void setIsGossipActive(boolean isGossipActive) {
this.isGossipActive.set(isGossipActive);
setHealthy();
}
public boolean isThriftActive() {
return isThriftActive.get();
}
public void setIsThriftActive(boolean isThriftActive) {
this.isThriftActive.set(isThriftActive);
setHealthy();
}
public boolean isNativeTransportActive() {
return isNativeTransportActive.get();
}
public void setIsNativeTransportActive(boolean isNativeTransportActive) {
this.isNativeTransportActive.set(isNativeTransportActive);
setHealthy();
}
public boolean isRequiredDirectoriesExist() {
return isRequiredDirectoriesExist.get();
}
public void setIsRequiredDirectoriesExist(boolean isRequiredDirectoriesExist) {
this.isRequiredDirectoriesExist.set(isRequiredDirectoriesExist);
setHealthy();
}
public boolean isCassandraProcessAlive() {
return isCassandraProcessAlive.get();
}
public void setCassandraProcessAlive(boolean isSideCarProcessAlive) {
this.isCassandraProcessAlive.set(isSideCarProcessAlive);
setHealthy();
}
public boolean shouldCassandraBeAlive() {
return shouldCassandraBeAlive.get();
}
public void setShouldCassandraBeAlive(boolean shouldCassandraBeAlive) {
this.shouldCassandraBeAlive.set(shouldCassandraBeAlive);
setIsHealthyOverride(shouldCassandraBeAlive);
}
public void setIsHealthyOverride(boolean isHealthyOverride) {
this.isHealthyOverride.set(isHealthyOverride);
}
public boolean isHealthyOverride() {
return this.isHealthyOverride.get();
}
public void markLastAttemptedStartTime() {
this.lastAttemptedStartTime.set(System.currentTimeMillis());
}
public long getLastAttemptedStartTime() {
return this.lastAttemptedStartTime.get();
}
/* Backup */
public void setBackupStatus(BackupMetadata backupMetadata) {
this.backupStatus = backupMetadata;
}
/* Restore */
public RestoreStatus getRestoreStatus() {
return restoreStatus;
}
// A dirty way to set restore status. This is required as setting restore status implies health
// could change.
public void setRestoreStatus(Status status) {
restoreStatus.status = status;
setHealthy();
}
public boolean isHealthy() {
return isHealthy.get();
}
private boolean isRestoring() {
return restoreStatus != null
&& restoreStatus.getStatus() != null
&& restoreStatus.getStatus() == Status.STARTED;
}
private void setHealthy() {
this.isHealthy.set(
isRestoring()
|| (isCassandraProcessAlive()
&& isRequiredDirectoriesExist()
&& isGossipActive()
&& isYmlWritten()
&& isHealthyOverride()
&& (isThriftActive() || isNativeTransportActive())));
}
public boolean isYmlWritten() {
return this.isYmlWritten.get();
}
public void setYmlWritten(boolean yml) {
this.isYmlWritten.set(yml);
}
public static class RestoreStatus {
private LocalDateTime startDateRange, endDateRange; // Date range to restore from
// Start and end times of the actual restore execution.
// Note these are referenced when this class is serialized to a String.
private LocalDateTime executionStartTime, executionEndTime;
private String snapshotMetaFile; // Location of the snapshot meta file selected for restore.
// the state of a restore. Note: this is different than the "status" of a Task.
private Status status;
public void resetStatus() {
this.snapshotMetaFile = null;
this.status = null;
this.startDateRange = endDateRange = null;
this.executionStartTime = this.executionEndTime = null;
}
@Override
public String toString() {
return GsonJsonSerializer.getGson().toJson(this);
}
public Status getStatus() {
return status;
}
public void setStartDateRange(LocalDateTime startDateRange) {
this.startDateRange = startDateRange;
}
public void setEndDateRange(LocalDateTime endDateRange) {
this.endDateRange = endDateRange;
}
public void setExecutionStartTime(LocalDateTime executionStartTime) {
this.executionStartTime = executionStartTime;
}
public void setExecutionEndTime(LocalDateTime executionEndTime) {
this.executionEndTime = executionEndTime;
}
public LocalDateTime getStartDateRange() {
return startDateRange;
}
public LocalDateTime getEndDateRange() {
return endDateRange;
}
public LocalDateTime getExecutionStartTime() {
return executionStartTime;
}
public String getSnapshotMetaFile() {
return snapshotMetaFile;
}
public void setSnapshotMetaFile(String snapshotMetaFile) {
this.snapshotMetaFile = snapshotMetaFile;
}
}
}
| 3,246 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/resources/PriamConfig.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.resources;
import com.netflix.priam.PriamServer;
import com.netflix.priam.utils.GsonJsonSerializer;
import java.util.HashMap;
import java.util.Map;
import javax.inject.Inject;
import javax.ws.rs.*;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This servlet will provide the configuration API service for use by external scripts and tooling
*/
@Path("/v1/config")
@Produces(MediaType.APPLICATION_JSON)
public class PriamConfig {
private static final Logger logger = LoggerFactory.getLogger(PriamConfig.class);
private final PriamServer priamServer;
@Inject
public PriamConfig(PriamServer server) {
this.priamServer = server;
}
private Response doGetPriamConfig(String group, String name) {
try {
final Map<String, Object> result = new HashMap<>();
final Map<String, Object> value =
priamServer.getConfiguration().getStructuredConfiguration(group);
if (name != null && value.containsKey(name)) {
result.put(name, value.get(name));
return Response.ok(GsonJsonSerializer.getGson().toJson(result)).build();
} else if (name != null) {
result.put("message", String.format("No such structured config: [%s]", name));
logger.error(String.format("No such structured config: [%s]", name));
return Response.status(404)
.entity(GsonJsonSerializer.getGson().toJson(result))
.type(MediaType.APPLICATION_JSON)
.build();
} else {
result.putAll(value);
return Response.ok(GsonJsonSerializer.getGson().toJson(result)).build();
}
} catch (Exception e) {
logger.error("Error while executing getPriamConfig", e);
return Response.serverError().build();
}
}
@GET
@Path("/structured/{group}")
public Response getPriamConfig(@PathParam("group") String group) {
return doGetPriamConfig(group, null);
}
@GET
@Path("/structured/{group}/{name}")
public Response getPriamConfigByName(
@PathParam("group") String group, @PathParam("name") String name) {
return doGetPriamConfig(group, name);
}
@GET
@Path("/unstructured/{name}")
public Response getProperty(
@PathParam("name") String name, @QueryParam("default") String defaultValue) {
Map<String, Object> result = new HashMap<>();
try {
String value = priamServer.getConfiguration().getProperty(name, defaultValue);
if (value != null) {
result.put(name, value);
return Response.ok(GsonJsonSerializer.getGson().toJson(result)).build();
} else {
result.put("message", String.format("No such property: [%s]", name));
logger.error(String.format("No such property: [%s]", name));
return Response.status(404)
.entity(GsonJsonSerializer.getGson().toJson(result))
.type(MediaType.APPLICATION_JSON)
.build();
}
} catch (Exception e) {
logger.error("Error while executing getPriamConfig", e);
return Response.serverError().build();
}
}
}
| 3,247 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/resources/CassandraAdmin.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.resources;
import com.google.common.collect.Lists;
import com.netflix.priam.cluster.management.Compaction;
import com.netflix.priam.cluster.management.Flush;
import com.netflix.priam.compress.SnappyCompression;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.connection.CassandraOperations;
import com.netflix.priam.connection.JMXConnectionException;
import com.netflix.priam.connection.JMXNodeTool;
import com.netflix.priam.defaultimpl.ICassandraProcess;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.text.DecimalFormat;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.ExecutionException;
import javax.inject.Inject;
import javax.ws.rs.*;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutorMBean;
import org.apache.cassandra.db.ColumnFamilyStoreMBean;
import org.apache.cassandra.db.compaction.CompactionManagerMBean;
import org.apache.cassandra.utils.EstimatedHistogram;
import org.apache.commons.lang3.StringUtils;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Do general operations. Start/Stop and some JMX node tool commands */
@Path("/v1/cassadmin")
@Produces(MediaType.APPLICATION_JSON)
public class CassandraAdmin {
private static final String REST_HEADER_KEYSPACES = "keyspaces";
private static final String REST_HEADER_CFS = "cfnames";
private static final String REST_HEADER_TOKEN = "token";
private static final String REST_SUCCESS = "[\"ok\"]";
private static final Logger logger = LoggerFactory.getLogger(CassandraAdmin.class);
private IConfiguration config;
private final ICassandraProcess cassProcess;
private final Flush flush;
private final Compaction compaction;
private final CassandraOperations cassandraOperations;
@Inject
public CassandraAdmin(
IConfiguration config,
ICassandraProcess cassProcess,
Flush flush,
Compaction compaction,
CassandraOperations cassandraOperations) {
this.config = config;
this.cassProcess = cassProcess;
this.flush = flush;
this.compaction = compaction;
this.cassandraOperations = cassandraOperations;
}
@GET
@Path("/start")
public Response cassStart() throws IOException {
cassProcess.start(true);
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/stop")
public Response cassStop(@DefaultValue("false") @QueryParam("force") boolean force)
throws IOException {
cassProcess.stop(force);
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/refresh")
public Response cassRefresh(@QueryParam(REST_HEADER_KEYSPACES) String keyspaces)
throws IOException, ExecutionException, InterruptedException {
logger.debug("node tool refresh is being called");
if (StringUtils.isBlank(keyspaces))
return Response.status(400).entity("Missing keyspace in request").build();
JMXNodeTool nodeTool;
try {
nodeTool = JMXNodeTool.instance(config);
} catch (JMXConnectionException e) {
return Response.status(503).entity("JMXConnectionException").build();
}
nodeTool.refresh(Lists.newArrayList(keyspaces.split(",")));
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/info")
public Response cassInfo() throws JSONException {
JMXNodeTool nodeTool;
try {
nodeTool = JMXNodeTool.instance(config);
} catch (JMXConnectionException e) {
return Response.status(503).entity("JMXConnectionException").build();
}
logger.debug("node tool info being called");
return Response.ok(nodeTool.info(), MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/partitioner")
public Response cassPartitioner() {
JMXNodeTool nodeTool;
try {
nodeTool = JMXNodeTool.instance(config);
} catch (JMXConnectionException e) {
return Response.status(503).entity("JMXConnectionException").build();
}
logger.debug("node tool getPartitioner being called");
return Response.ok(nodeTool.getPartitioner(), MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/ring/{id}")
public Response cassRing(@PathParam("id") String keyspace) throws JSONException {
JMXNodeTool nodeTool;
try {
nodeTool = JMXNodeTool.instance(config);
} catch (JMXConnectionException e) {
return Response.status(503).entity("JMXConnectionException").build();
}
logger.debug("node tool ring being called");
return Response.ok(nodeTool.ring(keyspace), MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/status")
public Response statusInfo() throws JSONException {
JMXNodeTool nodeTool;
try {
nodeTool = JMXNodeTool.instance(config);
} catch (JMXConnectionException e) {
return Response.status(503).entity("JMXConnectionException").build();
}
logger.debug("node tool status being called");
return Response.ok(nodeTool.statusInformation(), MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/flush")
public Response cassFlush() {
JSONObject rootObj = new JSONObject();
try {
flush.execute();
rootObj.put("Flushed", true);
return Response.ok().entity(rootObj).build();
} catch (Exception e) {
try {
rootObj.put("status", "ERROR");
rootObj.put("desc", e.getLocalizedMessage());
} catch (Exception e1) {
return Response.status(503).entity("FlushError").build();
}
return Response.status(503).entity(rootObj).build();
}
}
@GET
@Path("/compact")
public Response cassCompact() {
JSONObject rootObj = new JSONObject();
try {
compaction.execute();
rootObj.put("Compacted", true);
return Response.ok().entity(rootObj).build();
} catch (Exception e) {
try {
rootObj.put("status", "ERROR");
rootObj.put("desc", e.getLocalizedMessage());
} catch (Exception e1) {
return Response.status(503).entity("CompactionError").build();
}
return Response.status(503).entity(rootObj).build();
}
}
@GET
@Path("/cleanup")
public Response cassCleanup() throws IOException, ExecutionException, InterruptedException {
JMXNodeTool nodeTool;
try {
nodeTool = JMXNodeTool.instance(config);
} catch (JMXConnectionException e) {
return Response.status(503).entity("JMXConnectionException").build();
}
logger.debug("node tool cleanup being called");
nodeTool.cleanup();
return Response.ok().build();
}
@GET
@Path("/repair")
public Response cassRepair(
@QueryParam("sequential") boolean isSequential,
@QueryParam("localDC") boolean localDCOnly,
@DefaultValue("false") @QueryParam("primaryRange") boolean primaryRange)
throws IOException, ExecutionException, InterruptedException {
JMXNodeTool nodeTool;
try {
nodeTool = JMXNodeTool.instance(config);
} catch (JMXConnectionException e) {
return Response.status(503).entity("JMXConnectionException").build();
}
logger.debug("node tool repair being called");
nodeTool.repair(isSequential, localDCOnly, primaryRange);
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/version")
public Response version() {
JMXNodeTool nodeTool;
try {
nodeTool = JMXNodeTool.instance(config);
} catch (JMXConnectionException e) {
return Response.status(503).entity("JMXConnectionException").build();
}
return Response.ok(
new JSONArray().put(nodeTool.getReleaseVersion()),
MediaType.APPLICATION_JSON)
.build();
}
@GET
@Path("/tpstats")
public Response tpstats()
throws IOException, ExecutionException, InterruptedException, JSONException {
JMXNodeTool nodeTool;
try {
nodeTool = JMXNodeTool.instance(config);
} catch (JMXConnectionException e) {
logger.error(
"Exception in fetching c* jmx tool . Msgl: {}", e.getLocalizedMessage(), e);
return Response.status(503).entity("JMXConnectionException").build();
}
Iterator<Map.Entry<String, JMXEnabledThreadPoolExecutorMBean>> threads =
nodeTool.getThreadPoolMBeanProxies();
JSONArray threadPoolArray = new JSONArray();
while (threads.hasNext()) {
Entry<String, JMXEnabledThreadPoolExecutorMBean> thread = threads.next();
JMXEnabledThreadPoolExecutorMBean threadPoolProxy = thread.getValue();
JSONObject tpObj = new JSONObject(); // "Pool Name", "Active",
// "Pending", "Completed",
// "Blocked", "All time blocked"
tpObj.put("pool name", thread.getKey());
tpObj.put("active", threadPoolProxy.getActiveCount());
tpObj.put("pending", threadPoolProxy.getPendingTasks());
tpObj.put("completed", threadPoolProxy.getCompletedTasks());
tpObj.put("blocked", threadPoolProxy.getCurrentlyBlockedTasks());
tpObj.put("total blocked", threadPoolProxy.getTotalBlockedTasks());
threadPoolArray.put(tpObj);
}
JSONObject droppedMsgs = new JSONObject();
for (Entry<String, Integer> entry : nodeTool.getDroppedMessages().entrySet())
droppedMsgs.put(entry.getKey(), entry.getValue());
JSONObject rootObj = new JSONObject();
rootObj.put("thread pool", threadPoolArray);
rootObj.put("dropped messages", droppedMsgs);
return Response.ok(rootObj, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/compactionstats")
public Response compactionStats()
throws IOException, ExecutionException, InterruptedException, JSONException {
JMXNodeTool nodeTool;
try {
nodeTool = JMXNodeTool.instance(config);
} catch (JMXConnectionException e) {
logger.error(
"Exception in fetching c* jmx tool . Msgl: {}", e.getLocalizedMessage(), e);
return Response.status(503).entity("JMXConnectionException").build();
}
JSONObject rootObj = new JSONObject();
CompactionManagerMBean cm = nodeTool.getCompactionManagerProxy();
rootObj.put("pending tasks", cm.getPendingTasks());
JSONArray compStats = new JSONArray();
for (Map<String, String> c : cm.getCompactions()) {
JSONObject cObj = new JSONObject();
cObj.put("id", c.get("id"));
cObj.put("keyspace", c.get("keyspace"));
cObj.put("columnfamily", c.get("columnfamily"));
cObj.put("bytesComplete", c.get("bytesComplete"));
cObj.put("totalBytes", c.get("totalBytes"));
cObj.put("taskType", c.get("taskType"));
String percentComplete =
new Long(c.get("totalBytes")) == 0
? "n/a"
: new DecimalFormat("0.00")
.format(
(double) new Long(c.get("bytesComplete"))
/ new Long(c.get("totalBytes"))
* 100)
+ "%";
cObj.put("progress", percentComplete);
compStats.put(cObj);
}
rootObj.put("compaction stats", compStats);
return Response.ok(rootObj, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/disablegossip")
public Response disablegossip() {
JMXNodeTool nodeTool;
try {
nodeTool = JMXNodeTool.instance(config);
} catch (JMXConnectionException e) {
return Response.status(503).entity("JMXConnectionException").build();
}
nodeTool.stopGossiping();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/enablegossip")
public Response enablegossip() {
JMXNodeTool nodeTool;
try {
nodeTool = JMXNodeTool.instance(config);
} catch (JMXConnectionException e) {
return Response.status(503).entity("JMXConnectionException").build();
}
nodeTool.startGossiping();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/disablethrift")
public Response disablethrift() {
JMXNodeTool nodeTool;
try {
nodeTool = JMXNodeTool.instance(config);
} catch (JMXConnectionException e) {
return Response.status(503).entity("JMXConnectionException").build();
}
nodeTool.stopThriftServer();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/enablethrift")
public Response enablethrift() {
JMXNodeTool nodeTool;
try {
nodeTool = JMXNodeTool.instance(config);
} catch (JMXConnectionException e) {
return Response.status(503).entity("JMXConnectionException").build();
}
nodeTool.startThriftServer();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/statusthrift")
public Response statusthrift() throws JSONException {
JMXNodeTool nodeTool;
try {
nodeTool = JMXNodeTool.instance(config);
} catch (JMXConnectionException e) {
return Response.status(503).entity("JMXConnectionException").build();
}
return Response.ok(
new JSONObject()
.put(
"status",
(nodeTool.isThriftServerRunning()
? "running"
: "not running")),
MediaType.APPLICATION_JSON)
.build();
}
@GET
@Path("/gossipinfo")
public Response gossipinfo() throws Exception {
List<Map<String, String>> parsedInfo = cassandraOperations.gossipInfo();
return Response.ok(parsedInfo, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/move")
public Response moveToken(@QueryParam(REST_HEADER_TOKEN) String newToken) throws IOException {
JMXNodeTool nodeTool;
try {
nodeTool = JMXNodeTool.instance(config);
} catch (JMXConnectionException e) {
return Response.status(503).entity("JMXConnectionException").build();
}
nodeTool.move(newToken);
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/cfhistograms")
public Response cfhistograms(
@QueryParam(REST_HEADER_KEYSPACES) String keyspace,
@QueryParam(REST_HEADER_CFS) String cfname)
throws IOException, ExecutionException, InterruptedException, JSONException {
JMXNodeTool nodeTool;
try {
nodeTool = JMXNodeTool.instance(config);
} catch (JMXConnectionException e) {
logger.error(
"Exception in fetching c* jmx tool . Msgl: {}", e.getLocalizedMessage(), e);
return Response.status(503).entity("JMXConnectionException").build();
}
if (StringUtils.isBlank(keyspace) || StringUtils.isBlank(cfname))
return Response.status(400).entity("Missing keyspace/cfname in request").build();
ColumnFamilyStoreMBean store = nodeTool.getCfsProxy(keyspace, cfname);
// default is 90 offsets
long[] offsets = new EstimatedHistogram().getBucketOffsets();
long[] rrlh = store.getRecentReadLatencyHistogramMicros();
long[] rwlh = store.getRecentWriteLatencyHistogramMicros();
long[] sprh = store.getRecentSSTablesPerReadHistogram();
long[] ersh = store.getEstimatedRowSizeHistogram();
long[] ecch = store.getEstimatedColumnCountHistogram();
JSONObject rootObj = new JSONObject();
JSONArray columns = new JSONArray();
columns.put("offset");
columns.put("sstables");
columns.put("write latency");
columns.put("read latency");
columns.put("row size");
columns.put("column count");
rootObj.put("columns", columns);
JSONArray values = new JSONArray();
for (int i = 0; i < offsets.length; i++) {
JSONArray row = new JSONArray();
row.put(offsets[i]);
row.put(i < sprh.length ? sprh[i] : "");
row.put(i < rwlh.length ? rwlh[i] : "");
row.put(i < rrlh.length ? rrlh[i] : "");
row.put(i < ersh.length ? ersh[i] : "");
row.put(i < ecch.length ? ecch[i] : "");
values.put(row);
}
rootObj.put("values", values);
return Response.ok(rootObj, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/drain")
public Response cassDrain() throws IOException, ExecutionException, InterruptedException {
JMXNodeTool nodeTool;
try {
nodeTool = JMXNodeTool.instance(config);
} catch (JMXConnectionException e) {
return Response.status(503).entity("JMXConnectionException").build();
}
logger.debug("node tool drain being called");
nodeTool.drain();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
/*
@parm in - absolute path on disk of compressed file.
@param out - absolute path on disk for output, decompressed file
@parapm compression algorithn -- optional and if not provided, defaults to Snappy
*/
@GET
@Path("/decompress")
public Response decompress(@QueryParam("in") String in, @QueryParam("out") String out)
throws Exception {
SnappyCompression compress = new SnappyCompression();
compress.decompressAndClose(new FileInputStream(in), new FileOutputStream(out));
JSONObject object = new JSONObject();
object.put("Input compressed file", in);
object.put("Output decompress file", out);
return Response.ok(object.toString(), MediaType.APPLICATION_JSON).build();
}
}
| 3,248 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/resources/CassandraConfig.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.resources;
import com.netflix.priam.PriamServer;
import com.netflix.priam.identity.DoubleRing;
import com.netflix.priam.merics.CassMonitorMetrics;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.inject.Inject;
import javax.ws.rs.*;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.Status;
import org.apache.commons.lang3.StringUtils;
import org.json.simple.JSONValue;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This servlet will provide the configuration API service as and when Cassandra requests for it.
*/
@Path("/v1/cassconfig")
@Produces(MediaType.TEXT_PLAIN)
public class CassandraConfig {
private static final Logger logger = LoggerFactory.getLogger(CassandraConfig.class);
private final PriamServer priamServer;
private final DoubleRing doubleRing;
private final CassMonitorMetrics metrics;
@Inject
public CassandraConfig(PriamServer server, DoubleRing doubleRing, CassMonitorMetrics metrics) {
this.priamServer = server;
this.doubleRing = doubleRing;
this.metrics = metrics;
}
@GET
@Path("/get_seeds")
public Response getSeeds() {
try {
final List<String> seeds = priamServer.getInstanceIdentity().getSeeds();
if (!seeds.isEmpty()) {
metrics.incGetSeeds();
return Response.ok(StringUtils.join(seeds, ',')).build();
}
logger.error("Cannot find the Seeds");
} catch (Exception e) {
logger.error("Error while executing get_seeds", e);
return Response.serverError().build();
}
return Response.status(500).build();
}
@GET
@Path("/get_token")
public Response getToken() {
try {
String token = priamServer.getInstanceIdentity().getInstance().getToken();
if (StringUtils.isNotBlank(token)) {
logger.info("Returning token value \"{}\" for this instance to caller.", token);
metrics.incGetToken();
return Response.ok(priamServer.getInstanceIdentity().getInstance().getToken())
.build();
}
logger.error("Cannot find token for this instance.");
} catch (Exception e) {
// TODO: can this ever happen? if so, what conditions would cause an exception here?
logger.error("Error while executing get_token", e);
return Response.serverError().build();
}
return Response.status(500).build();
}
@GET
@Path("/is_replace_token")
public Response isReplaceToken() {
try {
return Response.ok(String.valueOf(priamServer.getInstanceIdentity().isReplace()))
.build();
} catch (Exception e) {
// TODO: can this ever happen? if so, what conditions would cause an exception here?
logger.error("Error while executing is_replace_token", e);
return Response.serverError().build();
}
}
@GET
@Path("/get_replaced_ip")
public Response getReplacedIp() {
try {
metrics.incGetReplacedIp();
return Response.ok(String.valueOf(priamServer.getInstanceIdentity().getReplacedIp()))
.build();
} catch (Exception e) {
logger.error("Error while executing get_replaced_ip", e);
return Response.serverError().build();
}
}
@POST
@Path("/set_replaced_ip")
public Response setReplacedIp(@QueryParam("ip") String ip) {
if (StringUtils.isEmpty(ip)) return Response.status(Status.BAD_REQUEST).build();
try {
priamServer.getInstanceIdentity().setReplacedIp(ip);
return Response.ok().build();
} catch (Exception e) {
logger.error("Error while overriding replacement ip", e);
return Response.serverError().build();
}
}
@GET
@Path("/get_extra_env_params")
public Response getExtraEnvParams() {
try {
Map<String, String> returnMap;
returnMap = priamServer.getConfiguration().getExtraEnvParams();
if (returnMap == null) {
returnMap = new HashMap<>();
}
String extraEnvParamsJson = JSONValue.toJSONString(returnMap);
return Response.ok(extraEnvParamsJson).build();
} catch (Exception e) {
logger.error("Error while executing get_extra_env_params", e);
return Response.serverError().build();
}
}
@GET
@Path("/double_ring")
public Response doubleRing() throws IOException, ClassNotFoundException {
try {
metrics.incDoubleRing();
doubleRing.backup();
doubleRing.doubleSlots();
} catch (Throwable th) {
logger.error("Error in doubling the ring...", th);
doubleRing.restore();
// rethrow
throw new RuntimeException(th);
}
return Response.status(200).build();
}
}
| 3,249 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/resources/BackupServletV2.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.resources;
import com.netflix.priam.backup.*;
import com.netflix.priam.backupv2.BackupTTLTask;
import com.netflix.priam.backupv2.BackupV2Service;
import com.netflix.priam.backupv2.IMetaProxy;
import com.netflix.priam.backupv2.SnapshotMetaTask;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.utils.DateUtil;
import com.netflix.priam.utils.DateUtil.DateRange;
import com.netflix.priam.utils.GsonJsonSerializer;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Provider;
import javax.ws.rs.*;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Created by aagrawal on 1/16/19. */
@Path("/v2/backup")
@Produces(MediaType.APPLICATION_JSON)
public class BackupServletV2 {
private static final Logger logger = LoggerFactory.getLogger(BackupServletV2.class);
private final BackupVerification backupVerification;
private final IBackupStatusMgr backupStatusMgr;
private final SnapshotMetaTask snapshotMetaService;
private final BackupTTLTask backupTTLService;
private final IBackupFileSystem fs;
private final IMetaProxy metaProxy;
private final Provider<AbstractBackupPath> pathProvider;
private final BackupV2Service backupService;
private static final String REST_SUCCESS = "[\"ok\"]";
@Inject
public BackupServletV2(
IBackupStatusMgr backupStatusMgr,
BackupVerification backupVerification,
SnapshotMetaTask snapshotMetaService,
BackupTTLTask backupTTLService,
IConfiguration configuration,
IFileSystemContext backupFileSystemCtx,
@Named("v2") IMetaProxy metaV2Proxy,
Provider<AbstractBackupPath> pathProvider,
BackupV2Service backupService) {
this.backupStatusMgr = backupStatusMgr;
this.backupVerification = backupVerification;
this.snapshotMetaService = snapshotMetaService;
this.backupTTLService = backupTTLService;
this.fs = backupFileSystemCtx.getFileStrategy(configuration);
this.metaProxy = metaV2Proxy;
this.pathProvider = pathProvider;
this.backupService = backupService;
}
@GET
@Path("/do_snapshot")
public Response backup() throws Exception {
snapshotMetaService.execute();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/ttl")
public Response ttl() throws Exception {
backupTTLService.execute();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/clearCache")
public Response clearCache() throws Exception {
fs.clearCache();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/updateService")
public Response updateService() throws Exception {
backupService.onChangeUpdateService();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/info/{date}")
public Response info(@PathParam("date") String date) {
Instant instant = DateUtil.parseInstant(date);
List<BackupMetadata> metadataList =
backupStatusMgr.getLatestBackupMetadata(
BackupVersion.SNAPSHOT_META_SERVICE,
new DateRange(
instant,
instant.plus(1, ChronoUnit.DAYS).truncatedTo(ChronoUnit.DAYS)));
return Response.ok(GsonJsonSerializer.getGson().toJson(metadataList)).build();
}
@GET
@Path("/validate/{daterange}")
public Response validateV2SnapshotByDate(
@PathParam("daterange") String daterange,
@DefaultValue("false") @QueryParam("force") boolean force)
throws Exception {
DateUtil.DateRange dateRange = new DateUtil.DateRange(daterange);
Optional<BackupVerificationResult> result =
backupVerification.verifyLatestBackup(
BackupVersion.SNAPSHOT_META_SERVICE, force, dateRange);
if (!result.isPresent()) {
return Response.noContent()
.entity("No valid meta found for provided time range")
.build();
}
return Response.ok(result.get().toString()).build();
}
@GET
@Path("/list/{daterange}")
public Response list(@PathParam("daterange") String daterange) throws Exception {
DateUtil.DateRange dateRange = new DateUtil.DateRange(daterange);
// Find latest valid meta file.
Optional<AbstractBackupPath> latestValidMetaFile =
BackupRestoreUtil.getLatestValidMetaPath(metaProxy, dateRange);
if (!latestValidMetaFile.isPresent()) {
return Response.ok("No valid meta found!").build();
}
List<AbstractBackupPath> allFiles =
BackupRestoreUtil.getMostRecentSnapshotPaths(
latestValidMetaFile.get(), metaProxy, pathProvider);
allFiles.addAll(
BackupRestoreUtil.getIncrementalPaths(
latestValidMetaFile.get(), dateRange, metaProxy));
return Response.ok(
GsonJsonSerializer.getGson()
.toJson(
allFiles.stream()
.map(AbstractBackupPath::getRemotePath)
.collect(Collectors.toList())))
.build();
}
}
| 3,250 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/resources/BackupServlet.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.resources;
import com.netflix.priam.backup.*;
import com.netflix.priam.backup.AbstractBackupPath.BackupFileType;
import com.netflix.priam.config.IBackupRestoreConfig;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.scheduler.PriamScheduler;
import com.netflix.priam.utils.DateUtil;
import com.netflix.priam.utils.DateUtil.DateRange;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import java.util.*;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Named;
import javax.ws.rs.*;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Path("/v1/backup")
@Produces(MediaType.APPLICATION_JSON)
public class BackupServlet {
private static final Logger logger = LoggerFactory.getLogger(BackupServlet.class);
private static final String REST_SUCCESS = "[\"ok\"]";
private static final String REST_HEADER_RANGE = "daterange";
private static final String REST_HEADER_FILTER = "filter";
private final IConfiguration config;
private final IBackupRestoreConfig backupRestoreConfig;
private final IBackupFileSystem backupFs;
private final SnapshotBackup snapshotBackup;
private final BackupVerification backupVerification;
@Inject private PriamScheduler scheduler;
private final IBackupStatusMgr completedBkups;
private final BackupService backupService;
@Inject private MetaData metaData;
@Inject
public BackupServlet(
IConfiguration config,
IBackupRestoreConfig backupRestoreConfig,
@Named("backup") IBackupFileSystem backupFs,
SnapshotBackup snapshotBackup,
IBackupStatusMgr completedBkups,
BackupVerification backupVerification,
BackupService backupService) {
this.config = config;
this.backupRestoreConfig = backupRestoreConfig;
this.backupFs = backupFs;
this.snapshotBackup = snapshotBackup;
this.completedBkups = completedBkups;
this.backupVerification = backupVerification;
this.backupService = backupService;
}
@GET
@Path("/do_snapshot")
public Response backup() throws Exception {
snapshotBackup.execute();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/incremental_backup")
public Response backupIncrementals() throws Exception {
scheduler.addTask(
"IncrementalBackup",
IncrementalBackup.class,
IncrementalBackup.getTimer(config, backupRestoreConfig));
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/updateService")
public Response updateService() throws Exception {
backupService.onChangeUpdateService();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/list")
/*
* Fetch the list of files for the requested date range.
*
* @param date range
* @param filter. The type of data files fetched. E.g. META will only fetch the daily snapshot meta data file (meta.json).
* @return the list of files in json format as part of the Http response body.
*/
public Response list(
@QueryParam(REST_HEADER_RANGE) String daterange,
@QueryParam(REST_HEADER_FILTER) @DefaultValue("") String filter)
throws Exception {
logger.info(
"Parameters: {backupPrefix: [{}], daterange: [{}], filter: [{}]}",
config.getBackupPrefix(),
daterange,
filter);
DateUtil.DateRange dateRange = new DateUtil.DateRange(daterange);
Iterator<AbstractBackupPath> it =
backupFs.list(
config.getBackupPrefix(),
Date.from(dateRange.getStartTime()),
Date.from(dateRange.getEndTime()));
JSONObject object = new JSONObject();
object = constructJsonResponse(object, it, filter);
return Response.ok(object.toString(2), MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/status")
@Produces(MediaType.APPLICATION_JSON)
public Response status() throws Exception {
JSONObject object = new JSONObject();
object.put("SnapshotStatus", snapshotBackup.state().toString());
return Response.ok(object.toString(), MediaType.APPLICATION_JSON).build();
}
/*
* Determines the status of a snapshot for a date. If there was at least one successful snpashot for the date, snapshot
* for the date is considered completed.
* @param date date of the snapshot. Format of date is yyyymmdd
* @return {"Snapshotstatus":false} or {"Snapshotstatus":true}
*/
@GET
@Path("/status/{date}")
@Produces(MediaType.APPLICATION_JSON)
public Response statusByDate(@PathParam("date") String date) throws Exception {
Instant startTime = DateUtil.parseInstant(date);
Optional<BackupMetadata> backupMetadataOptional =
this.completedBkups
.getLatestBackupMetadata(
BackupVersion.SNAPSHOT_BACKUP,
new DateRange(
startTime.truncatedTo(ChronoUnit.DAYS),
startTime
.plus(1, ChronoUnit.DAYS)
.truncatedTo(ChronoUnit.DAYS)))
.stream()
.findFirst();
JSONObject object = new JSONObject();
if (!backupMetadataOptional.isPresent()) {
object.put("Snapshotstatus", false);
} else {
object.put("Snapshotstatus", true);
object.put("Details", new JSONObject(backupMetadataOptional.get().toString()));
}
return Response.ok(object.toString(), MediaType.APPLICATION_JSON).build();
}
/*
* Determines the status of a snapshot for a date. If there was at least one successful snpashot for the date, snapshot
* for the date is considered completed.
* @param date date of the snapshot. Format of date is yyyymmdd
* @return {"Snapshots":["201606060450","201606060504"]} or "Snapshots":[]}
*/
@GET
@Path("/status/{date}/snapshots")
@Produces(MediaType.APPLICATION_JSON)
public Response snapshotsByDate(@PathParam("date") String date) throws Exception {
List<BackupMetadata> metadata = this.completedBkups.locate(date);
JSONObject object = new JSONObject();
List<String> snapshots = new ArrayList<>();
if (metadata != null && !metadata.isEmpty())
snapshots.addAll(
metadata.stream()
.filter(
backupMetadata ->
backupMetadata
.getBackupVersion()
.equals(BackupVersion.SNAPSHOT_BACKUP))
.map(
backupMetadata ->
DateUtil.formatyyyyMMddHHmm(backupMetadata.getStart()))
.collect(Collectors.toList()));
object.put("Snapshots", snapshots);
return Response.ok(object.toString(), MediaType.APPLICATION_JSON).build();
}
/*
* Determines the validity of the backup by i) Downloading meta.json file ii) Listing of the backup directory
* iii) Find the missing or extra files in backup location.
* This by default takes the latest snapshot of the application. One can provide exact hour and min to check specific backup.
* Input: Daterange in the format of yyyyMMddHHmm,yyyyMMddHHmm OR yyyyMMdd,yyyyMMdd OR default
*/
@GET
@Path("/validate/snapshot/{daterange}")
@Produces(MediaType.APPLICATION_JSON)
public Response validateSnapshotByDate(
@PathParam("daterange") String daterange,
@DefaultValue("false") @QueryParam("force") boolean force)
throws Exception {
DateUtil.DateRange dateRange = new DateUtil.DateRange(daterange);
Optional<BackupVerificationResult> result =
backupVerification.verifyLatestBackup(
BackupVersion.SNAPSHOT_BACKUP, force, dateRange);
if (!result.isPresent()) {
return Response.noContent()
.entity("No valid meta found for provided time range")
.build();
}
return Response.ok(result.get().toString()).build();
}
/*
* A list of files for requested filter. Currently, the only supported filter is META, all others will be ignore.
* For filter of META, ONLY the daily snapshot meta file (meta.json) are accounted for, not the incremental meta file.
* In addition, we do ONLY list the name of the meta data file, not the list of data files within it.
*
* @param handle to the json response
* @param a list of all files (data (*.db), and meta data file (*.json)) from S3 for requested dates.
* @param backup meta data file filter. Currently, the only supported filter is META, all others will be ignore.
* @return a list of files in Json format.
*/
private JSONObject constructJsonResponse(
JSONObject object, Iterator<AbstractBackupPath> it, String filter) throws Exception {
int fileCnt = 0;
filter = filter.contains("?") ? filter.substring(0, filter.indexOf("?")) : filter;
try {
JSONArray jArray = new JSONArray();
while (it.hasNext()) {
AbstractBackupPath p = it.next();
if (!filter.isEmpty() && BackupFileType.valueOf(filter) != p.getType()) continue;
JSONObject backupJSON = new JSONObject();
backupJSON.put("bucket", config.getBackupPrefix());
backupJSON.put("filename", p.getRemotePath());
backupJSON.put("app", p.getClusterName());
backupJSON.put("region", p.getRegion());
backupJSON.put("token", p.getToken());
backupJSON.put("ts", DateUtil.formatyyyyMMddHHmm(p.getTime()));
backupJSON.put(
"instance_id", p.getInstanceIdentity().getInstance().getInstanceId());
backupJSON.put("uploaded_ts", DateUtil.formatyyyyMMddHHmm(p.getUploadedTs()));
if ("meta".equalsIgnoreCase(filter)) { // only check for existence of meta file
p.setFileName(
"meta.json"); // ignore incremental meta files, we are only interested
// in daily snapshot
if (metaData.doesExist(p)) {
// if here, snapshot completed.
fileCnt++;
jArray.put(backupJSON);
backupJSON.put("num_files", "1");
}
} else { // account for every file (data, and meta) .
fileCnt++;
jArray.put(backupJSON);
}
}
object.put("files", jArray);
object.put("num_files", fileCnt);
} catch (JSONException jse) {
logger.info("Caught JSON Exception --> {}", jse.getMessage());
}
return object;
}
}
| 3,251 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/resources/PriamInstanceResource.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.resources;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.identity.IPriamInstanceFactory;
import com.netflix.priam.identity.PriamInstance;
import com.netflix.priam.identity.config.InstanceInfo;
import java.net.URI;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.ws.rs.*;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Resource for manipulating priam instances. */
@Path("/v1/instances")
@Produces(MediaType.TEXT_PLAIN)
public class PriamInstanceResource {
private static final Logger log = LoggerFactory.getLogger(PriamInstanceResource.class);
private final IConfiguration config;
private final IPriamInstanceFactory factory;
private final InstanceInfo instanceInfo;
@Inject // Note: do not parameterize the generic type variable to an implementation as it
// confuses
// Guice in the binding.
public PriamInstanceResource(
IConfiguration config, IPriamInstanceFactory factory, InstanceInfo instanceInfo) {
this.config = config;
this.factory = factory;
this.instanceInfo = instanceInfo;
}
/**
* Get the list of all priam instances
*
* @return the list of all priam instances
*/
@GET
public String getInstances() {
return factory.getAllIds(config.getAppName())
.stream()
.map(PriamInstance::toString)
.collect(Collectors.joining("\n", "", "\n"));
}
/**
* Returns an individual priam instance by id or WebApplicationException (404) if not found
*
* @param id the node id
* @return the priam instance
*/
@GET
@Path("{id}")
public String getInstance(@PathParam("id") int id) {
PriamInstance node = getByIdIfFound(id);
return node.toString();
}
/**
* Creates a new instance with the given parameters
*
* @param id the node id
* @return Response (201) if the instance was created
*/
@POST
public Response createInstance(
@QueryParam("id") int id,
@QueryParam("instanceID") String instanceID,
@QueryParam("hostname") String hostname,
@QueryParam("ip") String ip,
@QueryParam("rack") String rack,
@QueryParam("token") String token) {
log.info(
"Creating instance [id={}, instanceId={}, hostname={}, ip={}, rack={}, token={}",
id,
instanceID,
hostname,
ip,
rack,
token);
PriamInstance instance =
factory.create(
config.getAppName(), id, instanceID, hostname, ip, rack, null, token);
URI uri = UriBuilder.fromPath("/{id}").build(instance.getId());
return Response.created(uri).build();
}
/**
* Deletes the instance with the given {@code id}.
*
* @param id the node id
* @return Response (204) if the instance was deleted
*/
@DELETE
@Path("{id}")
public Response deleteInstance(@PathParam("id") int id) {
PriamInstance instance = getByIdIfFound(id);
factory.delete(instance);
return Response.noContent().build();
}
/**
* Returns the PriamInstance with the given {@code id}, or throws a WebApplicationException(400)
* if none found.
*
* @param id the node id
* @return PriamInstance with the given {@code id}
*/
private PriamInstance getByIdIfFound(int id) {
PriamInstance instance =
factory.getInstance(config.getAppName(), instanceInfo.getRegion(), id);
if (instance == null) {
throw notFound(String.format("No priam instance with id %s found", id));
}
return instance;
}
private static WebApplicationException notFound(String message) {
return new WebApplicationException(
Response.status(Response.Status.NOT_FOUND).entity(message).build());
}
}
| 3,252 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/resources/RestoreServlet.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.resources;
import com.netflix.priam.health.InstanceState;
import com.netflix.priam.restore.Restore;
import com.netflix.priam.utils.DateUtil;
import javax.inject.Inject;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Path("/v1")
@Produces(MediaType.APPLICATION_JSON)
public class RestoreServlet {
private static final Logger logger = LoggerFactory.getLogger(RestoreServlet.class);
private final Restore restoreObj;
private final InstanceState instanceState;
@Inject
public RestoreServlet(Restore restoreObj, InstanceState instanceState) {
this.restoreObj = restoreObj;
this.instanceState = instanceState;
}
/*
* @return metadata of current restore. If no restore in progress, returns the metadata of most recent restore attempt.
* restoreStatus: {
* startDateRange: "[yyyymmddhhmm]",
* endDateRange: "[yyyymmddhhmm]",
* executionStartTime: "[yyyymmddhhmm]",
* executionEndTime: "[yyyymmddhhmm]",
* snapshotMetaFile: "<meta.json> used for full snapshot",
* status: "STARTED|FINISHED|FAILED"
* }
*/
@GET
@Path("/restore/status")
public Response status() throws Exception {
return Response.ok(instanceState.getRestoreStatus().toString()).build();
}
@GET
@Path("/restore")
public Response restore(@QueryParam("daterange") String daterange) throws Exception {
DateUtil.DateRange dateRange = new DateUtil.DateRange(daterange);
logger.info(
"Parameters: {startTime: [{}], endTime: [{}]}",
dateRange.getStartTime().toString(),
dateRange.getEndTime().toString());
restoreObj.restore(dateRange);
return Response.ok("[\"ok\"]", MediaType.APPLICATION_JSON).build();
}
}
| 3,253 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/resources/SecurityGroupAdmin.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.resources;
import com.netflix.priam.identity.IMembership;
import java.util.Collections;
import javax.inject.Inject;
import javax.ws.rs.*;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This http endpoint allows direct updates (adding/removing) (CIDR) IP addresses and port ranges to
* the security group for this app.
*/
@Path("/v1/secgroup")
@Produces(MediaType.TEXT_PLAIN)
public class SecurityGroupAdmin {
private static final Logger log = LoggerFactory.getLogger(SecurityGroupAdmin.class);
private static final String CIDR_TAG = "/32";
private final IMembership membership;
@Inject
public SecurityGroupAdmin(IMembership membership) {
this.membership = membership;
}
@POST
public Response addACL(
@QueryParam("ip") String ipAddr,
@QueryParam("fromPort") int fromPort,
@QueryParam("toPort") int toPort) {
if (!ipAddr.endsWith(CIDR_TAG)) ipAddr += CIDR_TAG;
try {
membership.addACL(Collections.singletonList(ipAddr), fromPort, toPort);
} catch (Exception e) {
log.error("Error while trying to add an ACL to a security group", e);
return Response.serverError().build();
}
return Response.ok().build();
}
@DELETE
public Response removeACL(
@QueryParam("ip") String ipAddr,
@QueryParam("fromPort") int fromPort,
@QueryParam("toPort") int toPort) {
if (!ipAddr.endsWith(CIDR_TAG)) ipAddr += CIDR_TAG;
try {
membership.removeACL(Collections.singletonList(ipAddr), fromPort, toPort);
} catch (Exception e) {
log.error("Error while trying to remove an ACL to a security group", e);
return Response.serverError().build();
}
return Response.ok().build();
}
}
| 3,254 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/configSource/AbstractConfigSource.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.configSource;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
/** Base implementations for most methods on {@link IConfigSource}. */
public abstract class AbstractConfigSource implements IConfigSource {
private String asgName;
private String region;
@Override
public void initialize(final String asgName, final String region) {
this.asgName = checkNotNull(asgName, "ASG name is not defined");
this.region = checkNotNull(region, "Region is not defined");
}
@Override
public boolean contains(final String key) {
return get(key) != null;
}
@Override
public boolean isEmpty() {
return size() == 0;
}
@Override
public String get(final String key, final String defaultValue) {
final String value = get(key);
return (value != null) ? value : defaultValue;
}
@Override
public boolean get(final String key, final boolean defaultValue) {
final String value = get(key);
if (value != null) {
try {
return Boolean.parseBoolean(value);
} catch (Exception e) {
// ignore and return default
}
}
return defaultValue;
}
@Override
public Class<?> get(final String key, final Class<?> defaultValue) {
final String value = get(key);
if (value != null) {
try {
return Class.forName(value);
} catch (ClassNotFoundException e) {
// ignore and return default
}
}
return defaultValue;
}
@Override
public <T extends Enum<T>> T get(final String key, final T defaultValue) {
final String value = get(key);
if (value != null) {
try {
return Enum.valueOf(defaultValue.getDeclaringClass(), value);
} catch (Exception e) {
// ignore and return default.
}
}
return defaultValue;
}
@Override
public int get(final String key, final int defaultValue) {
final String value = get(key);
if (value != null) {
try {
return Integer.parseInt(value);
} catch (Exception e) {
// ignore and return default
}
}
return defaultValue;
}
@Override
public long get(final String key, final long defaultValue) {
final String value = get(key);
if (value != null) {
try {
return Long.parseLong(value);
} catch (Exception e) {
// return default.
}
}
return defaultValue;
}
@Override
public float get(final String key, final float defaultValue) {
final String value = get(key);
if (value != null) {
try {
return Float.parseFloat(value);
} catch (Exception e) {
// ignore and return default;
}
}
return defaultValue;
}
@Override
public double get(final String key, final double defaultValue) {
final String value = get(key);
if (value != null) {
try {
return Double.parseDouble(value);
} catch (Exception e) {
// ignore and return default.
}
}
return defaultValue;
}
@Override
public List<String> getList(String prop) {
return getList(prop, ImmutableList.of());
}
@Override
public List<String> getList(String prop, List<String> defaultValue) {
final String value = get(prop);
if (value != null) {
return getTrimmedStringList(value.split(","));
}
return defaultValue;
}
protected String getAsgName() {
return asgName;
}
protected String getRegion() {
return region;
}
private List<String> getTrimmedStringList(String[] strings) {
List<String> list = Lists.newArrayList();
for (String s : strings) {
list.add(StringUtils.strip(s));
}
return list;
}
}
| 3,255 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/configSource/MemoryConfigSource.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.configSource;
import com.google.common.collect.Maps;
import java.util.Map;
public final class MemoryConfigSource extends AbstractConfigSource {
private final Map<String, String> data = Maps.newConcurrentMap();
@Override
public int size() {
return data.size();
}
@Override
public String get(final String key) {
return data.get(key);
}
@Override
public void set(final String key, final String value) {
data.put(key, value);
}
}
| 3,256 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/configSource/PriamConfigSource.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.configSource;
import javax.inject.Inject;
/**
* Default {@link IConfigSource} pulling in configs from SimpleDB, local Properties, and System
* Properties.
*/
public class PriamConfigSource extends CompositeConfigSource {
@Inject
public PriamConfigSource(
final SimpleDBConfigSource simpleDBConfigSource,
final PropertiesConfigSource propertiesConfigSource,
final SystemPropertiesConfigSource systemPropertiesConfigSource) {
// this order was based off PriamConfigurations loading. W/e loaded last could override,
// but with Composite, first
// has the highest priority.
super(simpleDBConfigSource, propertiesConfigSource, systemPropertiesConfigSource);
}
}
| 3,257 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/configSource/IConfigSource.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.configSource;
import com.google.inject.ImplementedBy;
import java.util.List;
/** Defines the configurations for an application. */
@ImplementedBy(PriamConfigSource.class)
public interface IConfigSource {
/**
* Must be called before any other method. This method will allow implementations to do any
* setup that they require before being called.
*
* @param asgName: Name of the asg
* @param region: Name of the region
*/
void initialize(String asgName, String region);
/**
* A non-negative integer indicating a count of elements.
*
* @return non-negative integer indicating a count of elements.
*/
int size();
/**
* Returns {@code true} if the size is zero. May be more efficient than calculating size.
*
* @return {@code true} if the size is zero otherwise {@code false}.
*/
boolean isEmpty();
/**
* Check if the given key can be found in the config.
*
* @param key to look up value.
* @return if the key is present
*/
boolean contains(String key);
/**
* Get a String associated with the given configuration key.
*
* @param key to look up value.
* @return value from config or null if not present.
*/
String get(String key);
/**
* Get a String associated with the given configuration key.
*
* @param key to look up value.
* @param defaultValue if value is not present.
* @return value from config or defaultValue if not present.
*/
String get(String key, String defaultValue);
/**
* Get a boolean associated with the given configuration key.
*
* @param key to look up value.
* @param defaultValue if value is not present.
* @return value from config or defaultValue if not present.
*/
boolean get(String key, boolean defaultValue);
/**
* Get a Class associated with the given configuration key.
*
* @param key to look up value.
* @param defaultValue if value is not present.
* @return value from config or defaultValue if not present.
*/
Class<?> get(String key, Class<?> defaultValue);
/**
* Get a Enum associated with the given configuration key.
*
* @param key to look up value.
* @param defaultValue if value is not present.
* @param <T> enum type.
* @return value from config or defaultValue if not present.
*/
<T extends Enum<T>> T get(String key, T defaultValue);
/**
* Get a int associated with the given configuration key.
*
* @param key to look up value.
* @param defaultValue if value is not present.
* @return value from config or defaultValue if not present.
*/
int get(String key, int defaultValue);
/**
* Get a long associated with the given configuration key.
*
* @param key to look up value.
* @param defaultValue if value is not present.
* @return value from config or defaultValue if not present.
*/
long get(String key, long defaultValue);
/**
* Get a float associated with the given configuration key.
*
* @param key to look up value.
* @param defaultValue if value is not present.
* @return value from config or defaultValue if not present.
*/
float get(String key, float defaultValue);
/**
* Get a double associated with the given configuration key.
*
* @param key to look up value.
* @param defaultValue if value is not present.
* @return value from config or defaultValue if not present.
*/
double get(String key, double defaultValue);
/**
* Get a list of strings associated with the given configuration key.
*
* @param key to look up value.
* @return value from config or an immutable list if not present.
*/
List<String> getList(String key);
/**
* Get a list of strings associated with the given configuration key.
*
* @param key to look up value.
* @param defaultValue if value is not present.
* @return value from config or defaultValue if not present.
*/
List<String> getList(String key, List<String> defaultValue);
/**
* Set the value for the given key.
*
* @param key to set value for.
* @param value to set.
*/
void set(String key, String value);
}
| 3,258 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/configSource/PropertiesConfigSource.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.configSource;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.collect.Maps;
import java.io.IOException;
import java.net.URL;
import java.util.Map;
import java.util.Properties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Loads the 'Priam.properties' file as a source. */
public class PropertiesConfigSource extends AbstractConfigSource {
private static final Logger logger =
LoggerFactory.getLogger(PropertiesConfigSource.class.getName());
private static final String DEFAULT_PRIAM_PROPERTIES = "Priam.properties";
private final Map<String, String> data = Maps.newConcurrentMap();
private final String priamFile;
public PropertiesConfigSource() {
this.priamFile = DEFAULT_PRIAM_PROPERTIES;
}
public PropertiesConfigSource(final Properties properties) {
checkNotNull(properties);
this.priamFile = DEFAULT_PRIAM_PROPERTIES;
clone(properties);
}
@VisibleForTesting
PropertiesConfigSource(final String file) {
this.priamFile = checkNotNull(file);
}
@Override
public void initialize(final String asgName, final String region) {
super.initialize(asgName, region);
Properties properties = new Properties();
URL url = PropertiesConfigSource.class.getClassLoader().getResource(priamFile);
if (url != null) {
try {
properties.load(url.openStream());
clone(properties);
} catch (IOException e) {
logger.info("No Priam.properties. Ignore!");
}
} else {
logger.info("No Priam.properties. Ignore!");
}
}
@Override
public String get(final String prop) {
return data.get(prop);
}
@Override
public void set(final String key, final String value) {
Preconditions.checkNotNull(value, "Value can not be null for configurations.");
data.put(key, value);
}
@Override
public int size() {
return data.size();
}
@Override
public boolean contains(final String prop) {
return data.containsKey(prop);
}
/**
* Clones all the values from the properties. If the value is null, it will be ignored.
*
* @param properties to clone
*/
private void clone(final Properties properties) {
if (properties.isEmpty()) return;
synchronized (properties) {
for (final String key : properties.stringPropertyNames()) {
final String value = properties.getProperty(key);
if (!Strings.isNullOrEmpty(value)) {
data.put(key, value);
}
}
}
}
}
| 3,259 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/configSource/CompositeConfigSource.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.configSource;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableCollection;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import java.util.Collection;
/**
* A {@link IConfigSource} that delegates method calls to the underline sources. The order in which
* values are provided depend on the {@link IConfigSource}s provided. If user asks for key 'foo',
* and this composite has three sources, it will first check if the key is found in the first
* source, if not it will check the second and if not, the third, else return null or false if
* {@link #contains(String)} was called.
*
* <p>Implementation note: get methods with a default are implemented in {@link
* AbstractConfigSource}, if the underlying source overrides one of these methods, then that
* implementation will be ignored.
*/
public class CompositeConfigSource extends AbstractConfigSource {
private final ImmutableCollection<? extends IConfigSource> sources;
public CompositeConfigSource(final ImmutableCollection<? extends IConfigSource> sources) {
Preconditions.checkArgument(
!sources.isEmpty(),
"Can not create a composite config source without config sources!");
this.sources = sources;
}
public CompositeConfigSource(final Collection<? extends IConfigSource> sources) {
this(ImmutableList.copyOf(sources));
}
public CompositeConfigSource(final Iterable<? extends IConfigSource> sources) {
this(ImmutableList.copyOf(sources));
}
public CompositeConfigSource(final IConfigSource... sources) {
this(ImmutableList.copyOf(sources));
}
@Override
public void initialize(final String asgName, final String region) {
for (final IConfigSource source : sources) {
// TODO should this catch any potential exceptions?
source.initialize(asgName, region);
}
}
@Override
public int size() {
int size = 0;
for (final IConfigSource c : sources) {
size += c.size();
}
return size;
}
@Override
public boolean isEmpty() {
return size() == 0;
}
@Override
public boolean contains(final String key) {
return get(key) != null;
}
@Override
public String get(final String key) {
Preconditions.checkNotNull(key);
for (final IConfigSource c : sources) {
final String value = c.get(key);
if (value != null) {
return value;
}
}
return null;
}
@Override
public void set(final String key, final String value) {
Preconditions.checkNotNull(value, "Value can not be null for configurations.");
final IConfigSource firstSource = Iterables.getFirst(sources, null);
// firstSource shouldn't be null because the collection is immutable, and the collection is
// non empty.
Preconditions.checkState(
firstSource != null, "There was no IConfigSource found at the first location?");
firstSource.set(key, value);
}
}
| 3,260 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/configSource/SimpleDBConfigSource.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.configSource;
import com.amazonaws.services.simpledb.AmazonSimpleDB;
import com.amazonaws.services.simpledb.AmazonSimpleDBClient;
import com.amazonaws.services.simpledb.model.Attribute;
import com.amazonaws.services.simpledb.model.Item;
import com.amazonaws.services.simpledb.model.SelectRequest;
import com.amazonaws.services.simpledb.model.SelectResult;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.netflix.priam.cred.ICredential;
import java.util.Iterator;
import java.util.Map;
import javax.inject.Inject;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Loads config data from SimpleDB. {@link #initialize(String, String)} will query the SimpleDB
* domain "PriamProperties" for any potential configurations. The domain is set up to support
* multiple different clusters; this is done by using amazon's auto scaling groups (ASG).
*
* <p>Schema
*
* <ul>
* <li>"appId" // ASG up to first instance of '-'. So ASG name priam-test will create appId priam,
* ASG priam_test will create appId priam_test.
* <li>"property" // key to use for configs.
* <li>"value" // value to set for the given property/key.
* <li>"region" // region the config belongs to. If left empty, then applies to all regions.
* </ul>
*
* }
*/
public final class SimpleDBConfigSource extends AbstractConfigSource {
private static final Logger logger =
LoggerFactory.getLogger(SimpleDBConfigSource.class.getName());
private static final String DOMAIN = "PriamProperties";
private final Map<String, String> data = Maps.newConcurrentMap();
private final ICredential provider;
@Inject
public SimpleDBConfigSource(final ICredential provider) {
this.provider = provider;
}
@Override
public void initialize(final String asgName, final String region) {
super.initialize(asgName, region);
// End point is us-east-1
AmazonSimpleDB simpleDBClient =
AmazonSimpleDBClient.builder()
.withCredentials(provider.getAwsCredentialProvider())
.build();
String nextToken = null;
String appid =
asgName.lastIndexOf('-') > 0 ? asgName.substring(0, asgName.indexOf('-')) : asgName;
logger.info("appid used to fetch properties is: {}", appid);
do {
String ALL_QUERY = "select * from " + DOMAIN + " where " + Attributes.APP_ID + "='%s'";
SelectRequest request = new SelectRequest(String.format(ALL_QUERY, appid));
request.setNextToken(nextToken);
SelectResult result = simpleDBClient.select(request);
nextToken = result.getNextToken();
for (Item item : result.getItems()) addProperty(item);
} while (nextToken != null);
}
private static class Attributes {
public static final String APP_ID = "appId"; // ASG
public static final String PROPERTY = "property";
public static final String PROPERTY_VALUE = "value";
public static final String REGION = "region";
}
private void addProperty(Item item) {
Iterator<Attribute> attrs = item.getAttributes().iterator();
String prop = "";
String value = "";
String dc = "";
while (attrs.hasNext()) {
Attribute att = attrs.next();
if (att.getName().equals(Attributes.PROPERTY)) prop = att.getValue();
else if (att.getName().equals(Attributes.PROPERTY_VALUE)) value = att.getValue();
else if (att.getName().equals(Attributes.REGION)) dc = att.getValue();
}
// Ignore, if not this region
if (StringUtils.isNotBlank(dc) && !dc.equals(getRegion())) return;
// Override only if region is specified
if (data.containsKey(prop) && StringUtils.isBlank(dc)) return;
data.put(prop, value);
}
@Override
public int size() {
return data.size();
}
@Override
public String get(final String key) {
return data.get(key);
}
@Override
public void set(final String key, final String value) {
Preconditions.checkNotNull(value, "Value can not be null for configurations.");
data.put(key, value);
}
}
| 3,261 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/configSource/SystemPropertiesConfigSource.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.configSource;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.netflix.priam.config.PriamConfiguration;
import java.util.Map;
import java.util.Properties;
/**
* Loads {@link System#getProperties()} as a source.
*
* <p>Implementation note: {@link #set(String, String)} does not write to system properties, but
* will write to a new map. This means that setting values to this source has no effect on system
* properties or other instances of this class.
*/
public final class SystemPropertiesConfigSource extends AbstractConfigSource {
private static final String BLANK = "";
private final Map<String, String> data = Maps.newConcurrentMap();
@Override
public void initialize(final String asgName, final String region) {
super.initialize(asgName, region);
Properties systemProps = System.getProperties();
for (final String key : systemProps.stringPropertyNames()) {
if (!key.startsWith(PriamConfiguration.PRIAM_PRE)) continue;
final String value = systemProps.getProperty(key);
if (value != null && !BLANK.equals(value)) {
data.put(key, value);
}
}
}
@Override
public int size() {
return data.size();
}
@Override
public String get(final String key) {
return data.get(key);
}
@Override
public void set(final String key, final String value) {
Preconditions.checkNotNull(value, "Value can not be null for configurations.");
data.put(key, value);
}
}
| 3,262 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/scheduler/TaskTimer.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.scheduler;
import java.text.ParseException;
import org.quartz.Trigger;
/** Interface to represent time/interval */
public interface TaskTimer {
Trigger getTrigger() throws ParseException;
/*
@return the cron like expression use to schedule the task. Can return null or empty string.
*/
String getCronExpression();
}
| 3,263 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/scheduler/CronTimer.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.scheduler;
import java.text.ParseException;
import org.apache.commons.lang3.StringUtils;
import org.quartz.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Runs jobs at the specified absolute time and frequency */
public class CronTimer implements TaskTimer {
private static final Logger logger = LoggerFactory.getLogger(CronTimer.class);
private final String cronExpression;
private String name;
public enum DayOfWeek {
SUN,
MON,
TUE,
WED,
THU,
FRI,
SAT
}
/*
* interval in terms of minutes
*/
public CronTimer(String name, int min) {
this.name = name;
cronExpression = "*" + " " + "0/" + min + " " + "* * * ?";
}
/** Hourly cron. */
public CronTimer(String name, int minute, int sec) {
this.name = name;
cronExpression = sec + " " + minute + " 0/1 * * ?";
}
/** Daily Cron */
public CronTimer(String name, int hour, int minute, int sec) {
this.name = name;
cronExpression = sec + " " + minute + " " + hour + " * * ?";
}
/** Weekly cron jobs */
public CronTimer(String name, DayOfWeek dayofweek, int hour, int minute, int sec) {
this.name = name;
cronExpression = sec + " " + minute + " " + hour + " * * " + dayofweek;
}
/** Cron Expression. */
public CronTimer(String expression) {
this.cronExpression = expression;
}
public CronTimer(String name, String expression) {
this.name = name;
this.cronExpression = expression;
}
public Trigger getTrigger() throws ParseException {
return TriggerBuilder.newTrigger()
.withIdentity(name, Scheduler.DEFAULT_GROUP)
.withSchedule(CronScheduleBuilder.cronSchedule(cronExpression))
.build();
}
@Override
public String getCronExpression() {
return this.cronExpression;
}
public static CronTimer getCronTimer(final String jobName, final String cronExpression)
throws IllegalArgumentException {
CronTimer cronTimer = null;
if (!StringUtils.isEmpty(cronExpression) && cronExpression.equalsIgnoreCase("-1")) {
logger.info(
"Skipping {} as it is disabled via setting {} cron to -1.", jobName, jobName);
} else {
if (StringUtils.isEmpty(cronExpression)
|| !CronExpression.isValidExpression(cronExpression))
throw new IllegalArgumentException(
"Invalid CRON expression: "
+ cronExpression
+ ". Please use -1, if you wish to disable "
+ jobName
+ " else fix the CRON expression and try again!");
cronTimer = new CronTimer(jobName, cronExpression);
logger.info(
"Starting {} with CRON expression {}", jobName, cronTimer.getCronExpression());
}
return cronTimer;
}
}
| 3,264 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/scheduler/GuiceJobFactory.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.scheduler;
import com.google.inject.Injector;
import javax.inject.Inject;
import org.quartz.Job;
import org.quartz.JobDetail;
import org.quartz.Scheduler;
import org.quartz.SchedulerException;
import org.quartz.spi.JobFactory;
import org.quartz.spi.TriggerFiredBundle;
public class GuiceJobFactory implements JobFactory {
public final Injector guice;
@Inject
public GuiceJobFactory(Injector guice) {
this.guice = guice;
}
@Override
public Job newJob(TriggerFiredBundle bundle, Scheduler scheduler) throws SchedulerException {
JobDetail jobDetail = bundle.getJobDetail();
Class<?> jobClass = jobDetail.getJobClass();
Job job = (Job) guice.getInstance(jobClass);
guice.injectMembers(job);
return job;
}
}
| 3,265 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/scheduler/UnsupportedTypeException.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.scheduler;
/** Created by aagrawal on 3/14/17. */
public class UnsupportedTypeException extends Exception {
public UnsupportedTypeException(String msg, Throwable th) {
super(msg, th);
}
public UnsupportedTypeException(String msg) {
super(msg);
}
public UnsupportedTypeException(Exception ex) {
super(ex);
}
public UnsupportedTypeException(Throwable th) {
super(th);
}
}
| 3,266 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/scheduler/BlockingSubmitThreadPoolExecutor.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.scheduler;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* {@link ThreadPoolExecutor} that will block in the {@code submit()} method until the task can be
* successfully added to the queue.
*/
public class BlockingSubmitThreadPoolExecutor extends ThreadPoolExecutor {
private static final long DEFAULT_SLEEP = 100;
private static final long DEFAULT_KEEP_ALIVE = 100;
private static final Logger logger =
LoggerFactory.getLogger(BlockingSubmitThreadPoolExecutor.class);
private final BlockingQueue<Runnable> queue;
private final long giveupTime;
private final AtomicInteger active;
public BlockingSubmitThreadPoolExecutor(
int maximumPoolSize, BlockingQueue<Runnable> workQueue, long timeoutAdding) {
super(maximumPoolSize, maximumPoolSize, DEFAULT_KEEP_ALIVE, TimeUnit.SECONDS, workQueue);
this.queue = workQueue;
this.giveupTime = timeoutAdding;
this.active = new AtomicInteger(0);
}
/**
* This is a thread safe way to avoid rejection exception... this is implemented because we
* might want to hold the incoming requests till there is a free thread.
*/
@Override
public <T> Future<T> submit(Callable<T> task) {
synchronized (this) {
active.incrementAndGet();
long timeout = 0;
while (queue.remainingCapacity() == 0) {
try {
if (timeout <= giveupTime) {
Thread.sleep(DEFAULT_SLEEP);
timeout += DEFAULT_SLEEP;
} else {
throw new RuntimeException("Timed out because TPE is too busy...");
}
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
return super.submit(task);
}
}
@Override
protected void afterExecute(Runnable r, Throwable t) {
super.afterExecute(r, t);
active.decrementAndGet();
}
/** blocking call to test if the threads are done or not. */
public void sleepTillEmpty() {
long timeout = 0;
while (!queue.isEmpty() || (active.get() > 0)) {
try {
if (timeout <= giveupTime) {
Thread.sleep(DEFAULT_SLEEP);
timeout += DEFAULT_SLEEP;
logger.debug(
"After Sleeping for empty: {}, Count: {}", +queue.size(), active.get());
} else {
throw new RuntimeException("Timed out because TPE is too busy...");
}
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
}
| 3,267 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/scheduler/ExecutionException.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.scheduler;
public class ExecutionException extends Exception {
private static final long serialVersionUID = 1L;
public ExecutionException(String msg, Throwable th) {
super(msg, th);
}
public ExecutionException(String msg) {
super(msg);
}
public ExecutionException(Exception ex) {
super(ex);
}
public ExecutionException(Throwable th) {
super(th);
}
}
| 3,268 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/scheduler/NamedThreadPoolExecutor.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.scheduler;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.util.concurrent.*;
public class NamedThreadPoolExecutor extends ThreadPoolExecutor {
public NamedThreadPoolExecutor(int poolSize, String poolName) {
this(poolSize, poolName, new LinkedBlockingQueue<>());
}
public NamedThreadPoolExecutor(int poolSize, String poolName, BlockingQueue<Runnable> queue) {
super(
poolSize,
poolSize,
1000,
TimeUnit.MILLISECONDS,
queue,
new ThreadFactoryBuilder().setDaemon(true).setNameFormat(poolName + "-%d").build(),
new LocalRejectedExecutionHandler(queue));
}
private static class LocalRejectedExecutionHandler implements RejectedExecutionHandler {
private final BlockingQueue<Runnable> queue;
LocalRejectedExecutionHandler(BlockingQueue<Runnable> queue) {
this.queue = queue;
}
public void rejectedExecution(Runnable task, ThreadPoolExecutor executor) {
while (true) {
if (executor.isShutdown())
throw new RejectedExecutionException("ThreadPoolExecutor has shut down");
try {
if (queue.offer(task, 1000, TimeUnit.MILLISECONDS)) break;
} catch (InterruptedException e) {
// NOP
}
}
}
}
}
| 3,269 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/scheduler/Task.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.scheduler;
import com.netflix.priam.config.IConfiguration;
import java.util.concurrent.atomic.AtomicInteger;
import org.quartz.Job;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Task class that should be implemented by all cron tasks. Jobconf will contain any instance
* specific data
*
* <p>NOTE: Constructor must not throw any exception. This will cause Quartz to set the job to
* failure
*/
public abstract class Task implements Job {
public STATE status = STATE.DONE;
public enum STATE {
ERROR,
RUNNING,
DONE,
NOT_APPLICABLE
}
protected final IConfiguration config;
private static final Logger logger = LoggerFactory.getLogger(Task.class);
private final AtomicInteger errors = new AtomicInteger();
private final AtomicInteger executions = new AtomicInteger();
protected Task(IConfiguration config) {
this.config = config;
}
/** This method has to be implemented and cannot throw any exception. */
public void initialize() throws ExecutionException {
// nothing to initialize
}
public abstract void execute() throws Exception;
/** Main method to execute a task */
public void execute(JobExecutionContext context) throws JobExecutionException {
executions.incrementAndGet();
try {
if (status == STATE.RUNNING) return;
status = STATE.RUNNING;
execute();
} catch (Throwable e) {
status = STATE.ERROR;
logger.error("Could not execute the task: {} because of {}", getName(), e.getMessage());
e.printStackTrace();
errors.incrementAndGet();
}
if (status != STATE.ERROR) status = STATE.DONE;
}
public STATE state() {
return status;
}
public int getErrorCount() {
return errors.get();
}
public int getExecutionCount() {
return executions.get();
}
public abstract String getName();
}
| 3,270 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/scheduler/SimpleTimer.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.scheduler;
import com.google.common.base.Preconditions;
import java.text.ParseException;
import java.time.Instant;
import java.util.Date;
import org.quartz.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* SimpleTimer allows jobs to run starting from specified time occurring at regular frequency's.
* Frequency of the execution timestamp since epoch.
*/
public class SimpleTimer implements TaskTimer {
private static final Logger logger = LoggerFactory.getLogger(SimpleTimer.class);
private final Trigger trigger;
public SimpleTimer(String name, long interval) {
this.trigger =
TriggerBuilder.newTrigger()
.withIdentity(name)
.withSchedule(
SimpleScheduleBuilder.simpleSchedule()
.withIntervalInMilliseconds(interval)
.repeatForever()
.withMisfireHandlingInstructionFireNow())
.build();
}
/** Run forever every @period seconds starting at @start */
public SimpleTimer(String name, int period, Instant start) {
Preconditions.checkArgument(period > 0);
Preconditions.checkArgument(start.compareTo(Instant.EPOCH) >= 0);
this.trigger =
TriggerBuilder.newTrigger()
.withIdentity(name)
.withSchedule(
CalendarIntervalScheduleBuilder.calendarIntervalSchedule()
.withMisfireHandlingInstructionFireAndProceed()
.withIntervalInSeconds(period))
.startAt(Date.from(start))
.build();
}
/** Run once at given time... */
public SimpleTimer(String name, String group, long startTime) {
this.trigger =
TriggerBuilder.newTrigger()
.withIdentity(name, group)
.withSchedule(
SimpleScheduleBuilder.simpleSchedule()
.withMisfireHandlingInstructionFireNow())
.startAt(new Date(startTime))
.build();
}
/** Run immediatly and dont do that again. */
public SimpleTimer(String name) {
this.trigger =
TriggerBuilder.newTrigger()
.withIdentity(name, Scheduler.DEFAULT_GROUP)
.withSchedule(
SimpleScheduleBuilder.simpleSchedule()
.withMisfireHandlingInstructionFireNow())
.startNow()
.build();
}
public static SimpleTimer getSimpleTimer(final String jobName, final long interval)
throws IllegalArgumentException {
SimpleTimer simpleTimer = null;
if (interval <= 0) {
logger.info(
"Skipping {} as it is disabled via setting {} to {}.",
jobName,
jobName,
interval);
} else {
simpleTimer = new SimpleTimer(jobName, interval);
logger.info("Starting {} with interval of {}", jobName, interval);
}
return simpleTimer;
}
public Trigger getTrigger() throws ParseException {
return trigger;
}
@Override
public String getCronExpression() {
return null;
}
}
| 3,271 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/scheduler/PriamScheduler.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.scheduler;
import com.netflix.priam.utils.Sleeper;
import java.text.ParseException;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.quartz.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Scheduling class to schedule Priam tasks. Uses Quartz scheduler */
@Singleton
public class PriamScheduler {
private static final Logger logger = LoggerFactory.getLogger(PriamScheduler.class);
private final Scheduler scheduler;
private final GuiceJobFactory jobFactory;
private final Sleeper sleeper;
@Inject
public PriamScheduler(SchedulerFactory factory, GuiceJobFactory jobFactory, Sleeper sleeper) {
try {
this.scheduler = factory.getScheduler();
this.scheduler.setJobFactory(jobFactory);
this.jobFactory = jobFactory;
} catch (SchedulerException e) {
throw new RuntimeException(e);
}
this.sleeper = sleeper;
}
/** Add a task to the scheduler */
public void addTask(String name, Class<? extends Task> taskclass, TaskTimer timer)
throws SchedulerException, ParseException {
assert timer != null : "Cannot add scheduler task " + name + " as no timer is set";
JobDetail job =
JobBuilder.newJob()
.withIdentity(name, Scheduler.DEFAULT_GROUP)
.ofType(taskclass)
.build();
if (timer.getCronExpression() != null && !timer.getCronExpression().isEmpty()) {
logger.info(
"Scheduled task metadata. Task name: {}" + ", cron expression: {}",
taskclass.getName(),
timer.getCronExpression());
} else {
logger.info("Scheduled task metadata. Task name: {}", taskclass.getName());
}
scheduler.scheduleJob(job, timer.getTrigger());
}
/** Add a delayed task to the scheduler */
public void addTaskWithDelay(
final String name,
Class<? extends Task> taskclass,
final TaskTimer timer,
final int delayInSeconds) {
assert timer != null : "Cannot add scheduler task " + name + " as no timer is set";
final JobDetail job =
JobBuilder.newJob()
.withIdentity(name, Scheduler.DEFAULT_GROUP)
.ofType(taskclass)
.build();
// we know Priam doesn't do too many new tasks, so this is probably easy/safe/simple
new Thread(
() -> {
try {
sleeper.sleepQuietly(delayInSeconds * 1000L);
scheduler.scheduleJob(job, timer.getTrigger());
} catch (SchedulerException e) {
logger.warn(
"problem occurred while scheduling a job with name {}",
name,
e);
} catch (ParseException e) {
logger.warn(
"problem occurred while parsing a job with name {}",
name,
e);
}
})
.start();
}
public void runTaskNow(Class<? extends Task> taskclass) throws Exception {
jobFactory.guice.getInstance(taskclass).execute(null);
}
public void deleteTask(String name) throws SchedulerException {
TriggerKey triggerKey = TriggerKey.triggerKey(name, Scheduler.DEFAULT_GROUP);
// Check if trigger exists for the job. If there is a trigger, we want to remove those
// trigger.
if (scheduler.checkExists(triggerKey)) {
logger.info("Removing triggers for the job: {}", name);
scheduler.pauseTrigger(triggerKey);
scheduler.unscheduleJob(triggerKey);
}
// Check if any job exists for the key provided. If yes, we want to delete the job.
JobKey jobKey = JobKey.jobKey(name, Scheduler.DEFAULT_GROUP);
if (scheduler.checkExists(jobKey)) {
logger.info("Removing job from scheduler: {}", name);
scheduler.deleteJob(jobKey);
}
}
public final Scheduler getScheduler() {
return scheduler;
}
public void shutdown() {
try {
scheduler.shutdown();
} catch (SchedulerException e) {
throw new RuntimeException(e);
}
}
public void start() {
try {
scheduler.start();
} catch (SchedulerException ex) {
throw new RuntimeException(ex);
}
}
}
| 3,272 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/utils/SystemUtils.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.utils;
import com.google.common.base.Charsets;
import com.google.common.hash.HashCode;
import com.google.common.hash.Hashing;
import com.google.common.io.Files;
import java.io.*;
import java.net.HttpURLConnection;
import java.net.URL;
import java.security.MessageDigest;
import java.util.List;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SystemUtils {
private static final Logger logger = LoggerFactory.getLogger(SystemUtils.class);
public static String getDataFromUrl(String url) {
try {
HttpURLConnection conn = (HttpURLConnection) new URL(url).openConnection();
conn.setConnectTimeout(1000);
conn.setReadTimeout(1000);
conn.setRequestMethod("GET");
if (conn.getResponseCode() != 200) {
throw new RuntimeException("Unable to get data for URL " + url);
}
byte[] b = new byte[2048];
ByteArrayOutputStream bos = new ByteArrayOutputStream();
DataInputStream d = new DataInputStream((FilterInputStream) conn.getContent());
int c;
while ((c = d.read(b, 0, b.length)) != -1) bos.write(b, 0, c);
String return_ = new String(bos.toByteArray(), Charsets.UTF_8);
logger.info("Calling URL API: {} returns: {}", url, return_);
conn.disconnect();
return return_;
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
/**
* delete all the files/dirs in the given Directory but do not delete the dir itself.
*
* @param dirPath The directory path where all the child directories exist.
* @param childdirs List of child directories to be cleaned up in the dirPath
* @throws IOException If there is any error encountered during cleanup.
*/
public static void cleanupDir(String dirPath, List<String> childdirs) throws IOException {
if (childdirs == null || childdirs.size() == 0) FileUtils.cleanDirectory(new File(dirPath));
else {
for (String cdir : childdirs) FileUtils.cleanDirectory(new File(dirPath + "/" + cdir));
}
}
public static void createDirs(String location) throws IOException {
FileUtils.forceMkdir(new File(location));
}
public static byte[] md5(byte[] buf) {
try {
MessageDigest mdigest = MessageDigest.getInstance("MD5");
mdigest.update(buf, 0, buf.length);
return mdigest.digest();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Calculate the MD5 hashsum of the given file.
*
* @param file File for which md5 checksum should be calculated.
* @return Get a Md5 string which is similar to OS Md5sum
*/
public static String md5(File file) {
try {
HashCode hc = Files.hash(file, Hashing.md5());
return toHex(hc.asBytes());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static String toHex(byte[] digest) {
StringBuilder sb = new StringBuilder(digest.length * 2);
for (byte aDigest : digest) {
String hex = Integer.toHexString(aDigest);
if (hex.length() == 1) {
sb.append("0");
} else if (hex.length() == 8) {
hex = hex.substring(6);
}
sb.append(hex);
}
return sb.toString().toLowerCase();
}
public static String toBase64(byte[] md5) {
byte encoded[] = Base64.encodeBase64(md5, false);
return new String(encoded);
}
}
| 3,273 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/utils/ITokenManager.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.utils;
import com.google.inject.ImplementedBy;
import java.math.BigInteger;
import java.util.List;
@ImplementedBy(TokenManager.class)
public interface ITokenManager {
String createToken(int mySlot, int totalCount, String region);
BigInteger findClosestToken(BigInteger tokenToSearch, List<BigInteger> tokenList);
int regionOffset(String region);
}
| 3,274 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/utils/FifoQueue.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.utils;
import java.util.Comparator;
import java.util.TreeSet;
public class FifoQueue<E extends Comparable<E>> extends TreeSet<E> {
private static final long serialVersionUID = -7388604551920505669L;
private final int capacity;
public FifoQueue(int capacity) {
super(Comparator.naturalOrder());
this.capacity = capacity;
}
public FifoQueue(int capacity, Comparator<E> comparator) {
super(comparator);
this.capacity = capacity;
}
public synchronized void adjustAndAdd(E e) {
add(e);
if (capacity < size()) pollFirst();
}
}
| 3,275 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/utils/TokenManager.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.utils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Ordering;
import com.netflix.priam.config.IConfiguration;
import java.math.BigInteger;
import java.util.Collections;
import java.util.List;
import javax.inject.Inject;
public class TokenManager implements ITokenManager {
public static final BigInteger MINIMUM_TOKEN_RANDOM = BigInteger.ZERO;
public static final BigInteger MAXIMUM_TOKEN_RANDOM = new BigInteger("2").pow(127);
public static final BigInteger MINIMUM_TOKEN_MURMUR3 = new BigInteger("-2").pow(63);
public static final BigInteger MAXIMUM_TOKEN_MURMUR3 = new BigInteger("2").pow(63);
private final BigInteger minimumToken;
private final BigInteger maximumToken;
private final BigInteger tokenRangeSize;
private final IConfiguration config;
@Inject
public TokenManager(IConfiguration config) {
this.config = config;
if ("org.apache.cassandra.dht.Murmur3Partitioner".equals(this.config.getPartitioner())) {
minimumToken = MINIMUM_TOKEN_MURMUR3;
maximumToken = MAXIMUM_TOKEN_MURMUR3;
} else {
minimumToken = MINIMUM_TOKEN_RANDOM;
maximumToken = MAXIMUM_TOKEN_RANDOM;
}
tokenRangeSize = maximumToken.subtract(minimumToken);
}
/**
* Calculate a token for the given position, evenly spaced from other size-1 nodes. See
* http://wiki.apache.org/cassandra/Operations.
*
* @param size number of slots by which the token space will be divided
* @param position slot number, multiplier
* @param offset added to token
* @return MAXIMUM_TOKEN / size * position + offset, if <= MAXIMUM_TOKEN, otherwise wrap around
* the MINIMUM_TOKEN
*/
@VisibleForTesting
BigInteger initialToken(int size, int position, int offset) {
Preconditions.checkArgument(size > 0, "size must be > 0");
Preconditions.checkArgument(offset >= 0, "offset must be >= 0");
/*
* TODO: Is this it valid to add "&& position < size" to the following precondition? This currently causes
* unit test failures.
*/
Preconditions.checkArgument(position >= 0, "position must be >= 0");
return tokenRangeSize
.divide(BigInteger.valueOf(size))
.multiply(BigInteger.valueOf(position))
.add(BigInteger.valueOf(offset))
.add(minimumToken);
}
@Override
public String createToken(int my_slot, int totalCount, String region) {
return initialToken(totalCount, my_slot, regionOffset(region)).toString();
}
@Override
public BigInteger findClosestToken(BigInteger tokenToSearch, List<BigInteger> tokenList) {
Preconditions.checkArgument(!tokenList.isEmpty(), "token list must not be empty");
List<BigInteger> sortedTokens = Ordering.natural().sortedCopy(tokenList);
int index = Collections.binarySearch(sortedTokens, tokenToSearch, Ordering.natural());
if (index < 0) {
int i = Math.abs(index) - 1;
if ((i >= sortedTokens.size())
|| (i > 0
&& sortedTokens
.get(i)
.subtract(tokenToSearch)
.compareTo(
tokenToSearch.subtract(sortedTokens.get(i - 1)))
> 0)) --i;
return sortedTokens.get(i);
}
return sortedTokens.get(index);
}
/** Create an offset to add to token values by hashing the region name. */
@Override
public int regionOffset(String region) {
return Math.abs(region.hashCode());
}
}
| 3,276 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/utils/Sleeper.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.utils;
import com.google.inject.ImplementedBy;
/** An abstraction to {@link Thread#sleep(long)} so we can mock it in tests. */
@ImplementedBy(ThreadSleeper.class)
public interface Sleeper {
void sleep(long waitTimeMs) throws InterruptedException;
void sleepQuietly(long waitTimeMs);
}
| 3,277 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/utils/DateUtil.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.utils;
import java.time.Instant;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.ZoneId;
import java.time.format.DateTimeFormatter;
import java.time.format.DateTimeParseException;
import java.time.temporal.ChronoUnit;
import java.util.Date;
import javax.inject.Singleton;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.client.utils.DateUtils;
/** Utility functions for date. Created by aagrawal on 7/10/17. */
@Singleton
public class DateUtil {
public static final String yyyyMMdd = "yyyyMMdd";
public static final String yyyyMMddHHmm = "yyyyMMddHHmm";
private static final String[] patterns = {yyyyMMddHHmm, yyyyMMdd};
private static final ZoneId defaultZoneId = ZoneId.systemDefault();
private static final ZoneId utcZoneId = ZoneId.of("UTC");
/**
* Format the given date in format yyyyMMdd
*
* @param date to format
* @return date formatted in yyyyMMdd
*/
public static String formatyyyyMMdd(Date date) {
if (date == null) return null;
return DateUtils.formatDate(date, yyyyMMdd);
}
/**
* Format the given date in format yyyyMMddHHmm
*
* @param date to format
* @return date formatted in yyyyMMddHHmm
*/
public static String formatyyyyMMddHHmm(Date date) {
if (date == null) return null;
return DateUtils.formatDate(date, yyyyMMddHHmm);
}
/**
* Format the given date in given format
*
* @param date to format
* @param pattern e.g. yyyyMMddHHmm
* @return formatted date
*/
public static String formatDate(Date date, String pattern) {
return DateUtils.formatDate(date, pattern);
}
/**
* Parse the string to date
*
* @param date to parse. Accepted formats are yyyyMMddHHmm and yyyyMMdd
* @return the parsed date or null if input could not be parsed
*/
public static Date getDate(String date) {
if (StringUtils.isEmpty(date)) return null;
return DateUtils.parseDate(date, patterns);
}
/**
* Convert date to LocalDateTime using system default zone.
*
* @param date Date to be transformed
* @return converted date to LocalDateTime
*/
public static LocalDateTime convert(Date date) {
if (date == null) return null;
return date.toInstant().atZone(defaultZoneId).toLocalDateTime();
}
/**
* Format the given date in format yyyyMMdd
*
* @param date to format
* @return date formatted in yyyyMMdd
*/
public static String formatyyyyMMdd(LocalDateTime date) {
if (date == null) return null;
return date.format(DateTimeFormatter.ofPattern(yyyyMMdd));
}
/**
* Format the given date in format yyyyMMddHHmm
*
* @param date to format
* @return date formatted in yyyyMMddHHmm
*/
public static String formatyyyyMMddHHmm(LocalDateTime date) {
if (date == null) return null;
return date.format(DateTimeFormatter.ofPattern(yyyyMMddHHmm));
}
/**
* Parse the string to LocalDateTime
*
* @param date to parse. Accepted formats are yyyyMMddHHmm and yyyyMMdd
* @return the parsed LocalDateTime or null if input could not be parsed
*/
public static LocalDateTime getLocalDateTime(String date) {
if (StringUtils.isEmpty(date)) return null;
try {
LocalDateTime localDateTime =
LocalDateTime.parse(date, DateTimeFormatter.ofPattern(yyyyMMddHHmm));
if (localDateTime != null) return localDateTime;
} catch (DateTimeParseException e) {
// Try the date only.
try {
LocalDate localDate = LocalDate.parse(date, DateTimeFormatter.ofPattern(yyyyMMdd));
return localDate.atTime(0, 0);
} catch (DateTimeParseException ex) {
return null;
}
}
return null;
}
/**
* Return the current instant
*
* @return the instant
*/
public static Instant getInstant() {
return Instant.now();
}
/**
* Format the instant based on the pattern passed. If instant or pattern is null, null is
* returned.
*
* @param pattern Pattern that should
* @param instant Instant in time
* @return The formatted instant based on the pattern. Null, if pattern or instant is null.
*/
public static String formatInstant(String pattern, Instant instant) {
if (instant == null || StringUtils.isEmpty(pattern)) return null;
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern).withZone(utcZoneId);
return formatter.format(instant);
}
/**
* Parse the dateTime string to Instant based on the predefined set of patterns.
*
* @param dateTime DateTime string that needs to be parsed.
* @return Instant object depicting the date/time.
*/
public static final Instant parseInstant(String dateTime) {
LocalDateTime localDateTime = getLocalDateTime(dateTime);
if (localDateTime == null) return null;
return localDateTime.atZone(utcZoneId).toInstant();
}
public static class DateRange {
Instant startTime;
Instant endTime;
public DateRange(Instant startTime, Instant endTime) {
this.startTime = startTime;
this.endTime = endTime;
}
public DateRange(String daterange) {
if (StringUtils.isBlank(daterange) || daterange.equalsIgnoreCase("default")) {
endTime = getInstant();
startTime = endTime.minus(1, ChronoUnit.DAYS);
} else {
String[] dates = daterange.split(",");
startTime = parseInstant(dates[0]);
endTime = parseInstant(dates[1]);
}
}
public String match() {
if (startTime == null || endTime == null) return StringUtils.EMPTY;
String sString = startTime.toEpochMilli() + "";
String eString = endTime.toEpochMilli() + "";
int diff = StringUtils.indexOfDifference(sString, eString);
if (diff < 0) return sString;
return sString.substring(0, diff);
}
public Instant getStartTime() {
return startTime;
}
public Instant getEndTime() {
return endTime;
}
public String toString() {
return GsonJsonSerializer.getGson().toJson(this);
}
@Override
public boolean equals(Object obj) {
return obj.getClass().equals(this.getClass())
&& (startTime.toEpochMilli() == ((DateRange) obj).startTime.toEpochMilli())
&& (endTime.toEpochMilli() == ((DateRange) obj).endTime.toEpochMilli());
}
}
}
| 3,278 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/utils/ThreadSleeper.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.utils;
/** Sleeper impl that delegates to Thread.sleep */
public class ThreadSleeper implements Sleeper {
@Override
public void sleep(long waitTimeMs) throws InterruptedException {
Thread.sleep(waitTimeMs);
}
public void sleepQuietly(long waitTimeMs) {
try {
sleep(waitTimeMs);
} catch (InterruptedException e) {
// no-op
}
}
}
| 3,279 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/utils/RetryableCallable.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.utils;
import java.util.concurrent.Callable;
import java.util.concurrent.CancellationException;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class RetryableCallable<T> implements Callable<T> {
private static final Logger logger = LoggerFactory.getLogger(RetryableCallable.class);
private static final int DEFAULT_NUMBER_OF_RETRIES = 15;
private static final long DEFAULT_WAIT_TIME = 100;
private int retrys;
private long waitTime;
public RetryableCallable() {
this(DEFAULT_NUMBER_OF_RETRIES, DEFAULT_WAIT_TIME);
}
public RetryableCallable(int retrys, long waitTime) {
set(retrys, waitTime);
}
public void set(int retrys, long waitTime) {
this.retrys = retrys;
this.waitTime = waitTime;
}
protected abstract T retriableCall() throws Exception;
public T call() throws Exception {
int retry = 0;
int logCounter = 0;
while (true) {
try {
return retriableCall();
} catch (CancellationException e) {
throw e;
} catch (Exception e) {
retry++;
if (retry == retrys) {
throw e;
}
logger.error("Retry #{} for: {}", retry, e.getMessage());
if (++logCounter == 1 && logger.isErrorEnabled())
logger.error("Exception --> " + ExceptionUtils.getStackTrace(e));
Thread.sleep(waitTime);
} finally {
forEachExecution();
}
}
}
protected void forEachExecution() {
// do nothing by default.
}
}
| 3,280 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/utils/MaxSizeHashMap.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.utils;
import java.util.LinkedHashMap;
import java.util.Map;
/** Created by aagrawal on 7/11/17. */
/*
Limit the size of the hashmap using FIFO algorithm.
*/
public class MaxSizeHashMap<K, V> extends LinkedHashMap<K, V> {
private final int maxSize;
public MaxSizeHashMap(int maxSize) {
this.maxSize = maxSize;
}
@Override
protected boolean removeEldestEntry(Map.Entry<K, V> eldest) {
return size() > maxSize;
}
}
| 3,281 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/utils/ExponentialRetryCallable.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.utils;
import java.util.concurrent.CancellationException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class ExponentialRetryCallable<T> extends RetryableCallable<T> {
public static final long MAX_SLEEP = 240000;
public static final long MIN_SLEEP = 200;
private static final Logger logger = LoggerFactory.getLogger(ExponentialRetryCallable.class);
private final long max;
private final long min;
public ExponentialRetryCallable() {
this.max = MAX_SLEEP;
this.min = MIN_SLEEP;
}
public ExponentialRetryCallable(long minSleep, long maxSleep) {
this.max = maxSleep;
this.min = minSleep;
}
public T call() throws Exception {
long delay = min; // ms
while (true) {
try {
return retriableCall();
} catch (CancellationException e) {
throw e;
} catch (Exception e) {
delay *= 2;
if (delay > max) {
throw e;
}
logger.error(e.getMessage());
Thread.sleep(delay);
} finally {
forEachExecution();
}
}
}
}
| 3,282 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/utils/BoundedExponentialRetryCallable.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.utils;
import java.util.concurrent.CancellationException;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class BoundedExponentialRetryCallable<T> extends RetryableCallable<T> {
protected static final long MAX_SLEEP = 10000;
protected static final long MIN_SLEEP = 1000;
protected static final int MAX_RETRIES = 10;
private static final Logger logger =
LoggerFactory.getLogger(BoundedExponentialRetryCallable.class);
private final long max;
private final long min;
private final int maxRetries;
private final ThreadSleeper sleeper = new ThreadSleeper();
public BoundedExponentialRetryCallable() {
this.max = MAX_SLEEP;
this.min = MIN_SLEEP;
this.maxRetries = MAX_RETRIES;
}
public BoundedExponentialRetryCallable(long minSleep, long maxSleep, int maxNumRetries) {
this.max = maxSleep;
this.min = minSleep;
this.maxRetries = maxNumRetries;
}
public T call() throws Exception {
long delay = min; // ms
int retry = 0;
int logCounter = 0;
while (true) {
try {
return retriableCall();
} catch (CancellationException e) {
throw e;
} catch (Exception e) {
retry++;
if (delay < max && retry <= maxRetries) {
delay *= 2;
logger.error("Retry #{} for: {}", retry, e.getMessage());
if (++logCounter == 1 && logger.isInfoEnabled())
logger.info("Exception --> " + ExceptionUtils.getStackTrace(e));
sleeper.sleep(delay);
} else if (delay >= max && retry <= maxRetries) {
if (logger.isErrorEnabled()) {
logger.error(
String.format(
"Retry #%d for: %s",
retry, ExceptionUtils.getStackTrace(e)));
}
sleeper.sleep(max);
} else {
throw e;
}
} finally {
forEachExecution();
}
}
}
}
| 3,283 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/utils/GsonJsonSerializer.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.utils;
import com.google.gson.*;
import com.google.gson.stream.JsonReader;
import com.google.gson.stream.JsonToken;
import com.google.gson.stream.JsonWriter;
import java.io.IOException;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Instant;
import java.time.LocalDateTime;
import java.util.Date;
/** Created by aagrawal on 10/12/17. */
public class GsonJsonSerializer {
private static final Gson gson =
new GsonBuilder()
// .serializeNulls()
.serializeSpecialFloatingPointValues()
.setPrettyPrinting()
.disableHtmlEscaping()
.registerTypeAdapter(Date.class, new DateTypeAdapter())
.registerTypeAdapter(LocalDateTime.class, new LocalDateTimeTypeAdapter())
.registerTypeAdapter(Instant.class, new InstantTypeAdapter())
.registerTypeAdapter(Path.class, new PathTypeAdapter())
.setExclusionStrategies(new PriamAnnotationExclusionStrategy())
.create();
public static Gson getGson() {
return gson;
}
// Excludes any field (or class) that is tagged with an "@EunomiaIgnore"
public static class PriamAnnotationExclusionStrategy implements ExclusionStrategy {
public boolean shouldSkipClass(Class<?> clazz) {
return clazz.getAnnotation(PriamAnnotation.GsonIgnore.class) != null;
}
public boolean shouldSkipField(FieldAttributes f) {
return f.getAnnotation(PriamAnnotation.GsonIgnore.class) != null;
}
}
public static class PriamAnnotation {
@Retention(RetentionPolicy.RUNTIME)
// @Target({ElementType.FIELD,ElementType.METHOD})
public @interface GsonIgnore {
// Field tag only annotation
}
}
static class DateTypeAdapter extends TypeAdapter<Date> {
@Override
public void write(JsonWriter out, Date value) throws IOException {
out.value(DateUtil.formatyyyyMMddHHmm(value));
}
@Override
public Date read(JsonReader in) throws IOException {
if (in.peek() == JsonToken.NULL) {
in.nextNull();
return null;
}
String result = in.nextString();
if ("".equals(result)) {
return null;
}
return DateUtil.getDate(result);
}
}
static class LocalDateTimeTypeAdapter extends TypeAdapter<LocalDateTime> {
@Override
public void write(JsonWriter out, LocalDateTime value) throws IOException {
out.value(DateUtil.formatyyyyMMddHHmm(value));
}
@Override
public LocalDateTime read(JsonReader in) throws IOException {
if (in.peek() == JsonToken.NULL) {
in.nextNull();
return null;
}
String result = in.nextString();
if ("".equals(result)) {
return null;
}
return DateUtil.getLocalDateTime(result);
}
}
static class InstantTypeAdapter extends TypeAdapter<Instant> {
@Override
public void write(JsonWriter out, Instant value) throws IOException {
out.value(getEpoch(value));
}
private long getEpoch(Instant value) {
return (value == null) ? null : value.toEpochMilli();
}
@Override
public Instant read(JsonReader in) throws IOException {
if (in.peek() == JsonToken.NULL) {
in.nextNull();
return null;
}
String result = in.nextString();
if ("".equals(result)) {
return null;
}
return Instant.ofEpochMilli(Long.parseLong(result));
}
}
static class PathTypeAdapter extends TypeAdapter<Path> {
@Override
public void write(JsonWriter out, Path value) throws IOException {
String fileName = (value != null) ? value.toFile().getName() : null;
out.value(fileName);
}
@Override
public Path read(JsonReader in) throws IOException {
if (in.peek() == JsonToken.NULL) {
in.nextNull();
return null;
}
String result = in.nextString();
if ("".equals(result)) {
return null;
}
return Paths.get(result);
}
}
}
| 3,284 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cli/Application.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.cli;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.netflix.priam.backup.IBackupFileSystem;
import com.netflix.priam.config.IConfiguration;
public class Application {
private static Injector injector;
static Injector getInjector() {
if (injector == null) injector = Guice.createInjector(new LightGuiceModule());
return injector;
}
static void initialize() {
IConfiguration conf = getInjector().getInstance(IConfiguration.class);
conf.initialize();
}
static void shutdownAdditionalThreads() {
IBackupFileSystem fs = getInjector().getInstance(IBackupFileSystem.class);
fs.shutdown();
}
}
| 3,285 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cli/Backuper.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.cli;
import com.netflix.priam.backup.SnapshotBackup;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class Backuper {
private static final Logger logger = LoggerFactory.getLogger(Backuper.class);
public static void main(String[] args) {
try {
Application.initialize();
SnapshotBackup backuper = Application.getInjector().getInstance(SnapshotBackup.class);
try {
backuper.execute();
} catch (Exception e) {
logger.error("Unable to backup: ", e);
}
} finally {
Application.shutdownAdditionalThreads();
}
}
}
| 3,286 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cli/IncrementalBackuper.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.cli;
import com.netflix.priam.backup.IncrementalBackup;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class IncrementalBackuper {
private static final Logger logger = LoggerFactory.getLogger(IncrementalBackuper.class);
public static void main(String[] args) {
try {
Application.initialize();
IncrementalBackup backuper =
Application.getInjector().getInstance(IncrementalBackup.class);
try {
backuper.execute();
} catch (Exception e) {
logger.error("Unable to backup: ", e);
}
} finally {
Application.shutdownAdditionalThreads();
}
}
}
| 3,287 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cli/LightGuiceModule.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.cli;
import com.google.inject.AbstractModule;
import com.netflix.priam.aws.S3FileSystem;
import com.netflix.priam.backup.IBackupFileSystem;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.identity.IMembership;
class LightGuiceModule extends AbstractModule {
@Override
protected void configure() {
bind(IConfiguration.class).asEagerSingleton();
bind(IMembership.class).to(StaticMembership.class);
bind(IBackupFileSystem.class).to(S3FileSystem.class);
}
}
| 3,288 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cli/StaticMembership.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.cli;
import com.google.common.collect.ImmutableSet;
import com.netflix.priam.identity.IMembership;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.Collection;
import java.util.Properties;
import org.apache.cassandra.io.util.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class StaticMembership implements IMembership {
private static final String MEMBERSHIP_PRE = "membership.";
private static final String INSTANCES_PRE = MEMBERSHIP_PRE + "instances.";
private static final String RAC_NAME = MEMBERSHIP_PRE + "racname";
private static final String DEFAULT_PROP_PATH = "/etc/priam/membership.properties";
private static final Logger logger = LoggerFactory.getLogger(StaticMembership.class);
private ImmutableSet<String> racMembership;
private int racCount;
public StaticMembership() throws IOException {
Properties config = new Properties();
FileInputStream fis = null;
try {
fis = new FileInputStream(DEFAULT_PROP_PATH);
config.load(fis);
} catch (Exception e) {
logger.error("Exception with static membership file ", e);
throw new RuntimeException("Problem reading static membership file. Cannot start.", e);
} finally {
FileUtils.closeQuietly(fis);
}
String racName = config.getProperty(RAC_NAME);
racCount = 0;
for (String name : config.stringPropertyNames()) {
if (name.startsWith(INSTANCES_PRE)) {
racCount += 1;
if (name.equals(INSTANCES_PRE + racName))
racMembership = ImmutableSet.copyOf(config.getProperty(name).split(","));
}
}
}
@Override
public ImmutableSet<String> getRacMembership() {
return racMembership;
}
@Override
public ImmutableSet<String> getCrossAccountRacMembership() {
return null;
}
@Override
public int getRacMembershipSize() {
if (racMembership == null) return 0;
return racMembership.size();
}
@Override
public int getRacCount() {
return racCount;
}
@Override
public void addACL(Collection<String> listIPs, int from, int to) {}
@Override
public void removeACL(Collection<String> listIPs, int from, int to) {}
@Override
public ImmutableSet<String> listACL(int from, int to) {
return null;
}
@Override
public void expandRacMembership(int count) {}
}
| 3,289 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cli/Restorer.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.cli;
import com.netflix.priam.restore.Restore;
import com.netflix.priam.utils.DateUtil;
import java.time.Instant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class Restorer {
private static final Logger logger = LoggerFactory.getLogger(Restorer.class);
static void displayHelp() {
System.out.println("Usage: command_name FROM_DATE TO_DATE");
}
public static void main(String[] args) {
try {
Application.initialize();
Instant startTime, endTime;
if (args.length < 2) {
displayHelp();
return;
}
startTime = DateUtil.parseInstant(args[0]);
endTime = DateUtil.parseInstant(args[1]);
Restore restorer = Application.getInjector().getInstance(Restore.class);
try {
restorer.restore(new DateUtil.DateRange(startTime, endTime));
} catch (Exception e) {
logger.error("Unable to restore: ", e);
}
} finally {
Application.shutdownAdditionalThreads();
}
}
}
| 3,290 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cred/ClearCredential.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.cred;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
import java.io.FileInputStream;
import java.util.Properties;
import org.apache.cassandra.io.util.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This is a basic implementation of ICredentials. User should prefer to implement their own
* versions for more secured access. This class requires clear AWS key and access.
*
* <p>Set the following properties in "conf/awscredntial.properties"
*/
public class ClearCredential implements ICredential {
private static final Logger logger = LoggerFactory.getLogger(ClearCredential.class);
private static final String CRED_FILE = "/etc/awscredential.properties";
private final String AWS_ACCESS_ID;
private final String AWS_KEY;
public ClearCredential() {
FileInputStream fis = null;
try {
fis = new FileInputStream(CRED_FILE);
final Properties props = new Properties();
props.load(fis);
AWS_ACCESS_ID =
props.getProperty("AWSACCESSID") != null
? props.getProperty("AWSACCESSID").trim()
: "";
AWS_KEY = props.getProperty("AWSKEY") != null ? props.getProperty("AWSKEY").trim() : "";
} catch (Exception e) {
logger.error("Exception with credential file ", e);
throw new RuntimeException("Problem reading credential file. Cannot start.", e);
} finally {
FileUtils.closeQuietly(fis);
}
}
public AWSCredentialsProvider getAwsCredentialProvider() {
return new AWSCredentialsProvider() {
public AWSCredentials getCredentials() {
return new BasicAWSCredentials(AWS_ACCESS_ID, AWS_KEY);
}
public void refresh() {
// NOP
}
};
}
}
| 3,291 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cred/ICredentialGeneric.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.cred;
/**
* Credential file interface for services supporting Access ID and key authentication for non-AWS
*/
public interface ICredentialGeneric extends ICredential {
byte[] getValue(KEY key);
enum KEY {
PGP_PUBLIC_KEY_LOC,
PGP_PASSWORD,
GCS_SERVICE_ID,
GCS_PRIVATE_KEY_LOC
}
}
| 3,292 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/cred/ICredential.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.cred;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.google.inject.ImplementedBy;
/** Credential file interface for services supporting Access ID and key authentication */
@ImplementedBy(ClearCredential.class)
public interface ICredential {
/** @return AWS Credential Provider object */
AWSCredentialsProvider getAwsCredentialProvider();
}
| 3,293 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/defaultimpl/ICassandraProcess.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.defaultimpl;
import com.google.inject.ImplementedBy;
import java.io.IOException;
/**
* Interface to aid in starting and stopping cassandra.
*
* @author jason brown
*/
@ImplementedBy(CassandraProcessManager.class)
public interface ICassandraProcess {
void start(boolean join_ring) throws IOException;
void stop(boolean force) throws IOException;
}
| 3,294 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/defaultimpl/CassandraProcessManager.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.defaultimpl;
import com.google.common.collect.Lists;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.connection.JMXNodeTool;
import com.netflix.priam.health.InstanceState;
import com.netflix.priam.merics.CassMonitorMetrics;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.*;
import javax.inject.Inject;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class CassandraProcessManager implements ICassandraProcess {
private static final Logger logger = LoggerFactory.getLogger(CassandraProcessManager.class);
private static final String SUDO_STRING = "/usr/bin/sudo";
private static final int SCRIPT_EXECUTE_WAIT_TIME_MS = 5000;
protected final IConfiguration config;
private final InstanceState instanceState;
private final CassMonitorMetrics cassMonitorMetrics;
@Inject
public CassandraProcessManager(
IConfiguration config,
InstanceState instanceState,
CassMonitorMetrics cassMonitorMetrics) {
this.config = config;
this.instanceState = instanceState;
this.cassMonitorMetrics = cassMonitorMetrics;
}
protected void setEnv(Map<String, String> env) {
// If we can tune a jvm.options file instead of setting these
// environment variables we prefer to set heap sizes that way
if (!config.supportsTuningJVMOptionsFile()) {
env.put("HEAP_NEWSIZE", config.getHeapNewSize());
env.put("MAX_HEAP_SIZE", config.getHeapSize());
}
env.put("DATA_DIR", config.getDataFileLocation());
env.put("COMMIT_LOG_DIR", config.getCommitLogLocation());
env.put("LOCAL_BACKUP_DIR", config.getBackupLocation());
env.put("CACHE_DIR", config.getCacheLocation());
env.put("JMX_PORT", "" + config.getJmxPort());
env.put("LOCAL_JMX", config.enableRemoteJMX() ? "no" : "yes");
env.put("MAX_DIRECT_MEMORY", config.getMaxDirectMemory());
env.put("CASS_LOGS_DIR", config.getLogDirLocation());
env.put("CASSANDRA_LOG_DIR", config.getLogDirLocation());
}
public void start(boolean join_ring) throws IOException {
logger.info("Starting cassandra server ....Join ring={}", join_ring);
instanceState.markLastAttemptedStartTime();
instanceState.setShouldCassandraBeAlive(true);
List<String> command = Lists.newArrayList();
if (config.useSudo()) {
logger.info("Configured to use sudo to start C*");
if (!"root".equals(System.getProperty("user.name"))) {
command.add(SUDO_STRING);
command.add("-n");
command.add("-E");
}
}
command.addAll(getStartCommand());
ProcessBuilder startCass = new ProcessBuilder(command);
Map<String, String> env = startCass.environment();
setEnv(env);
env.put("cassandra.join_ring", join_ring ? "true" : "false");
startCass.directory(new File("/"));
startCass.redirectErrorStream(true);
logger.info("Start cmd: {}", startCass.command());
logger.info("Start env: {}", startCass.environment());
Process starter = startCass.start();
logger.info("Starting cassandra server ....");
try {
int code = starter.waitFor();
if (code == 0) {
logger.info("Cassandra server has been started");
instanceState.setCassandraProcessAlive(true);
this.cassMonitorMetrics.incCassStart();
} else logger.error("Unable to start cassandra server. Error code: {}", code);
logProcessOutput(starter);
} catch (Exception e) {
logger.warn("Starting Cassandra has an error", e);
}
}
protected List<String> getStartCommand() {
List<String> startCmd = new LinkedList<>();
for (String param : config.getCassStartupScript().split(" ")) {
if (StringUtils.isNotBlank(param)) startCmd.add(param);
}
return startCmd;
}
void logProcessOutput(Process p) {
try {
final String stdOut = readProcessStream(p.getInputStream());
final String stdErr = readProcessStream(p.getErrorStream());
logger.info("std_out: {}", stdOut);
logger.info("std_err: {}", stdErr);
} catch (IOException ioe) {
logger.warn("Failed to read the std out/err streams", ioe);
}
}
String readProcessStream(InputStream inputStream) throws IOException {
final byte[] buffer = new byte[512];
final ByteArrayOutputStream baos = new ByteArrayOutputStream(buffer.length);
int cnt;
while ((cnt = inputStream.read(buffer)) != -1) baos.write(buffer, 0, cnt);
return baos.toString();
}
public void stop(boolean force) throws IOException {
logger.info("Stopping cassandra server ....");
List<String> command = Lists.newArrayList();
if (config.useSudo()) {
logger.info("Configured to use sudo to stop C*");
if (!"root".equals(System.getProperty("user.name"))) {
command.add(SUDO_STRING);
command.add("-n");
command.add("-E");
}
}
for (String param : config.getCassStopScript().split(" ")) {
if (StringUtils.isNotBlank(param)) command.add(param);
}
ProcessBuilder stopCass = new ProcessBuilder(command);
stopCass.directory(new File("/"));
stopCass.redirectErrorStream(true);
instanceState.setShouldCassandraBeAlive(false);
if (!force && config.getGracefulDrainHealthWaitSeconds() >= 0) {
ExecutorService executor = Executors.newSingleThreadExecutor();
Future drainFuture =
executor.submit(
() -> {
// As the node has been marked as shutting down above in
// setShouldCassandraBeAlive, we wait this
// duration to allow external healthcheck systems time to pick up
// the state change.
try {
Thread.sleep(config.getGracefulDrainHealthWaitSeconds() * 1000);
} catch (InterruptedException e) {
return;
}
try {
JMXNodeTool nodetool = JMXNodeTool.instance(config);
nodetool.drain();
} catch (InterruptedException
| IOException
| ExecutionException e) {
logger.error(
"Exception draining Cassandra, could not drain. Proceeding with shutdown.",
e);
}
// Once Cassandra is drained the thrift/native servers are shutdown
// and there is no need to wait to
// stop Cassandra. Just stop it now.
});
// In case drain hangs, timeout the future and continue stopping anyways. Give drain 30s
// always
// In production we frequently see servers that do not want to drain
try {
drainFuture.get(config.getGracefulDrainHealthWaitSeconds() + 30, TimeUnit.SECONDS);
} catch (ExecutionException | TimeoutException | InterruptedException e) {
logger.error(
"Waited 30s for drain but it did not complete, continuing to shutdown", e);
}
}
Process stopper = stopCass.start();
try {
int code = stopper.waitFor();
if (code == 0) {
logger.info("Cassandra server has been stopped");
this.cassMonitorMetrics.incCassStop();
instanceState.setCassandraProcessAlive(false);
} else {
logger.error("Unable to stop cassandra server. Error code: {}", code);
logProcessOutput(stopper);
}
} catch (Exception e) {
logger.warn("couldn't shut down cassandra correctly", e);
}
}
}
| 3,295 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/defaultimpl/IService.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.defaultimpl;
import com.netflix.priam.scheduler.PriamScheduler;
import com.netflix.priam.scheduler.Task;
import com.netflix.priam.scheduler.TaskTimer;
import java.text.ParseException;
import org.quartz.SchedulerException;
/**
* This is how we create a new service in Priam. Any service we start, should implement this
* interface so we can update the service at run-time if required.
*
* <p>Created by aagrawal on 3/9/19.
*/
public interface IService {
/**
* This method is called to schedule the service when we initialize it for first time ONLY.
*
* @throws Exception if there is any error while trying to schedule the service.
*/
void scheduleService() throws Exception;
/**
* This method is called before we try to update the service. Use this method to do any kind of
* maintenance operations before we change the scheduling of all the jobs in service.
*
* @throws Exception if there is any error in pre-hook method of service.
*/
void updateServicePre() throws Exception;
/**
* This method is called after we re-schedule all the services in PriamScheduler. Use this
* method for post hook maintenance operations after changing the scehdule of all the jobs.
*
* @throws Exception if there is any error in post-hook method of service.
*/
void updateServicePost() throws Exception;
/**
* Schedule a given task. It will safely delete that task from scheduler before scheduling.
*
* @param priamScheduler Scheduler in use by Priam.
* @param task Task that needs to be scheduled in priamScheduler
* @param taskTimer Timer for the task
* @throws SchedulerException If there is any error in deleting the task or scheduling a new
* job.
* @throws ParseException If there is any error in parsing the taskTimer while trying to add a
* new job to scheduler.
*/
default void scheduleTask(
PriamScheduler priamScheduler, Class<? extends Task> task, TaskTimer taskTimer)
throws SchedulerException, ParseException {
priamScheduler.deleteTask(task.getName());
if (taskTimer == null) return;
priamScheduler.addTask(task.getName(), task, taskTimer);
}
/**
* Update the service. This method will be called to update the service while Priam is running.
*
* @throws Exception if any issue while updating the service.
*/
default void onChangeUpdateService() throws Exception {
updateServicePre();
scheduleService();
updateServicePost();
}
}
| 3,296 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/defaultimpl/PriamGuiceModule.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.defaultimpl;
import com.google.inject.AbstractModule;
import com.google.inject.name.Names;
import com.netflix.priam.aws.S3CrossAccountFileSystem;
import com.netflix.priam.aws.S3EncryptedFileSystem;
import com.netflix.priam.aws.S3FileSystem;
import com.netflix.priam.aws.auth.EC2RoleAssumptionCredential;
import com.netflix.priam.aws.auth.IS3Credential;
import com.netflix.priam.aws.auth.S3RoleAssumptionCredential;
import com.netflix.priam.backup.IBackupFileSystem;
import com.netflix.priam.backupv2.IMetaProxy;
import com.netflix.priam.backupv2.MetaV1Proxy;
import com.netflix.priam.backupv2.MetaV2Proxy;
import com.netflix.priam.cred.ICredential;
import com.netflix.priam.cred.ICredentialGeneric;
import com.netflix.priam.cryptography.IFileCryptography;
import com.netflix.priam.cryptography.pgp.PgpCredential;
import com.netflix.priam.cryptography.pgp.PgpCryptography;
import com.netflix.priam.google.GcsCredential;
import com.netflix.priam.google.GoogleEncryptedFileSystem;
import com.netflix.spectator.api.NoopRegistry;
import com.netflix.spectator.api.Registry;
import org.quartz.SchedulerFactory;
import org.quartz.impl.StdSchedulerFactory;
public class PriamGuiceModule extends AbstractModule {
@Override
protected void configure() {
bind(SchedulerFactory.class).to(StdSchedulerFactory.class).asEagerSingleton();
bind(IBackupFileSystem.class).annotatedWith(Names.named("backup")).to(S3FileSystem.class);
bind(IBackupFileSystem.class)
.annotatedWith(Names.named("encryptedbackup"))
.to(S3EncryptedFileSystem.class);
bind(S3CrossAccountFileSystem.class);
bind(IBackupFileSystem.class)
.annotatedWith(Names.named("gcsencryptedbackup"))
.to(GoogleEncryptedFileSystem.class);
bind(IS3Credential.class)
.annotatedWith(Names.named("awss3roleassumption"))
.to(S3RoleAssumptionCredential.class);
bind(ICredential.class)
.annotatedWith(Names.named("awsec2roleassumption"))
.to(EC2RoleAssumptionCredential.class);
bind(IFileCryptography.class)
.annotatedWith(Names.named("filecryptoalgorithm"))
.to(PgpCryptography.class);
bind(ICredentialGeneric.class)
.annotatedWith(Names.named("gcscredential"))
.to(GcsCredential.class);
bind(ICredentialGeneric.class)
.annotatedWith(Names.named("pgpcredential"))
.to(PgpCredential.class);
bind(IMetaProxy.class).annotatedWith(Names.named("v1")).to(MetaV1Proxy.class);
bind(IMetaProxy.class).annotatedWith(Names.named("v2")).to(MetaV2Proxy.class);
bind(Registry.class).toInstance(new NoopRegistry());
}
}
| 3,297 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/defaultimpl/InjectedWebListener.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.defaultimpl;
import com.google.common.collect.Lists;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Module;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.netflix.priam.PriamServer;
import com.netflix.priam.config.IConfiguration;
import com.sun.jersey.api.core.PackagesResourceConfig;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.spi.container.servlet.ServletContainer;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.servlet.ServletContextEvent;
import org.quartz.Scheduler;
import org.quartz.SchedulerException;
import org.quartz.SchedulerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class InjectedWebListener extends GuiceServletContextListener {
protected static final Logger logger = LoggerFactory.getLogger(InjectedWebListener.class);
private Injector injector;
@Override
protected Injector getInjector() {
List<Module> moduleList = Lists.newArrayList();
moduleList.add(new JaxServletModule());
moduleList.add(new PriamGuiceModule());
injector = Guice.createInjector(moduleList);
try {
injector.getInstance(IConfiguration.class).initialize();
injector.getInstance(PriamServer.class).scheduleService();
} catch (Exception e) {
logger.error(e.getMessage(), e);
throw new RuntimeException(e.getMessage(), e);
}
return injector;
}
@Override
public void contextDestroyed(ServletContextEvent servletContextEvent) {
try {
for (Scheduler scheduler :
injector.getInstance(SchedulerFactory.class).getAllSchedulers()) {
scheduler.shutdown();
}
} catch (SchedulerException e) {
throw new RuntimeException(e);
}
super.contextDestroyed(servletContextEvent);
}
public static class JaxServletModule extends ServletModule {
@Override
protected void configureServlets() {
Map<String, String> params = new HashMap<>();
params.put(PackagesResourceConfig.PROPERTY_PACKAGES, "unbound");
params.put("com.sun.jersey.config.property.packages", "com.netflix.priam.resources");
params.put(ServletContainer.PROPERTY_FILTER_CONTEXT_PATH, "/REST");
serve("/REST/*").with(GuiceContainer.class, params);
}
}
}
| 3,298 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backupv2/MetaFileInfo.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backupv2;
import com.netflix.priam.utils.DateUtil;
import com.netflix.priam.utils.GsonJsonSerializer;
import java.time.Instant;
import java.util.List;
/** This POJO class encapsulates the information for a meta file. */
public class MetaFileInfo {
@GsonJsonSerializer.PriamAnnotation.GsonIgnore
public static final String META_FILE_PREFIX = "meta_v2_";
@GsonJsonSerializer.PriamAnnotation.GsonIgnore
public static final String META_FILE_SUFFIX = ".json";
@GsonJsonSerializer.PriamAnnotation.GsonIgnore
public static final String META_FILE_INFO = "info";
@GsonJsonSerializer.PriamAnnotation.GsonIgnore
public static final String META_FILE_DATA = "data";
private short version = 1;
private String appName;
private String region;
private String rack;
public void setVersion(short version) {
this.version = version;
}
public void setAppName(String appName) {
this.appName = appName;
}
public void setRegion(String region) {
this.region = region;
}
public void setRack(String rack) {
this.rack = rack;
}
public void setBackupIdentifier(List<String> backupIdentifier) {
this.backupIdentifier = backupIdentifier;
}
private List<String> backupIdentifier;
public MetaFileInfo(String appName, String region, String rack, List<String> backupIdentifier) {
this.appName = appName;
this.region = region;
this.rack = rack;
this.backupIdentifier = backupIdentifier;
}
public short getVersion() {
return version;
}
public String getAppName() {
return appName;
}
public String getRegion() {
return region;
}
public String getRack() {
return rack;
}
public List<String> getBackupIdentifier() {
return backupIdentifier;
}
@Override
public String toString() {
return GsonJsonSerializer.getGson().toJson(this);
}
public static String getMetaFileName(Instant instant) {
return MetaFileInfo.META_FILE_PREFIX
+ DateUtil.formatInstant(DateUtil.yyyyMMddHHmm, instant)
+ MetaFileInfo.META_FILE_SUFFIX;
}
}
| 3,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.