index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backupv2/IMetaProxy.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backupv2;
import com.netflix.priam.backup.AbstractBackupPath;
import com.netflix.priam.backup.BackupRestoreException;
import com.netflix.priam.backup.BackupVerificationResult;
import com.netflix.priam.utils.DateUtil;
import java.nio.file.Path;
import java.util.Iterator;
import java.util.List;
/** Proxy to do management tasks for meta files. Created by aagrawal on 12/18/18. */
public interface IMetaProxy {
/**
* Path on the local file system where meta file should be stored for processing.
*
* @return location on local file system.
*/
Path getLocalMetaFileDirectory();
/**
* Get the prefix for the manifest file. This will depend on the configuration, if restore
* prefix is set.
*
* @param dateRange date range for which we are trying to find manifest files.
* @return prefix for the manifest files.
*/
String getMetaPrefix(DateUtil.DateRange dateRange);
/**
* Fetch the list of all manifest files on the remote file system for the provided valid
* daterange.
*
* @param dateRange the time period to scan in the remote file system for meta files.
* @return List of all the manifest files from the remote file system.
*/
List<AbstractBackupPath> findMetaFiles(DateUtil.DateRange dateRange);
/**
* Download the meta file to disk.
*
* @param meta AbstractBackupPath denoting the meta file on remote file system.
* @return the location of the meta file on disk after downloading from remote file system.
* @throws BackupRestoreException if unable to download for any reason.
*/
Path downloadMetaFile(AbstractBackupPath meta) throws BackupRestoreException;
/**
* Read the manifest file and give the contents of the file (all the sstable components) as
* list.
*
* @param localMetaPath location of the manifest file on disk.
* @return list containing all the remote locations of sstable components.
* @throws Exception if file is not found on local system or is corrupt.
*/
List<String> getSSTFilesFromMeta(Path localMetaPath) throws Exception;
/**
* Get the list of incremental files given the daterange.
*
* @param dateRange the time period to scan in the remote file system for incremental files.
* @return iterator containing the list of path on the remote file system satisfying criteria.
*/
Iterator<AbstractBackupPath> getIncrementals(DateUtil.DateRange dateRange);
/**
* Validate that all the files mentioned in the meta file actually exists on remote file system.
*
* @param metaBackupPath Path to the remote meta file.
* @return backupVerificationResult containing the information like valid - if all the files
* mentioned in meta file are present on remote file system. It will return false in case of
* any error.
*/
BackupVerificationResult isMetaFileValid(AbstractBackupPath metaBackupPath);
/** Delete the old meta files, if any present in the metaFileDirectory */
void cleanupOldMetaFiles();
}
| 3,300 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backupv2/FileUploadResult.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backupv2;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.netflix.priam.backup.AbstractBackupPath;
import com.netflix.priam.compress.CompressionType;
import com.netflix.priam.cryptography.CryptographyAlgorithm;
import com.netflix.priam.utils.GsonJsonSerializer;
import java.io.File;
import java.nio.file.Path;
import java.time.Instant;
/**
* This is a POJO that will encapsulate the result of file upload. Created by aagrawal on 6/20/18.
*/
public class FileUploadResult {
private final Path fileName;
private final Instant lastModifiedTime;
private final Instant fileCreationTime;
private final long fileSizeOnDisk; // Size on disk in bytes
// Valid compression technique for now is SNAPPY only. Future we need to support LZ4 and NONE
private final CompressionType compression;
// Valid encryption technique for now is PLAINTEXT only. In future we will support pgp and more.
private final CryptographyAlgorithm encryption;
private Boolean isUploaded;
private String backupPath;
@VisibleForTesting
public FileUploadResult(
Path fileName,
Instant lastModifiedTime,
Instant fileCreationTime,
long fileSizeOnDisk) {
this.fileName = fileName;
this.lastModifiedTime = lastModifiedTime;
this.fileCreationTime = fileCreationTime;
this.fileSizeOnDisk = fileSizeOnDisk;
this.compression = CompressionType.SNAPPY;
this.encryption = CryptographyAlgorithm.PLAINTEXT;
}
public FileUploadResult(AbstractBackupPath path) {
Preconditions.checkArgument(path.getLastModified().toEpochMilli() > 0);
Preconditions.checkArgument(path.getCreationTime().toEpochMilli() > 0);
File file = path.getBackupFile();
this.fileName = file.toPath();
this.backupPath = path.getRemotePath();
this.lastModifiedTime = path.getLastModified();
this.fileCreationTime = path.getCreationTime();
this.fileSizeOnDisk = path.getSize();
this.compression = path.getCompression();
this.encryption = path.getEncryption();
}
public void setUploaded(Boolean uploaded) {
isUploaded = uploaded;
}
public Boolean getIsUploaded() {
return isUploaded;
}
public Path getFileName() {
return fileName;
}
public String getBackupPath() {
return backupPath;
}
public void setBackupPath(String backupPath) {
this.backupPath = backupPath;
}
@Override
public String toString() {
return GsonJsonSerializer.getGson().toJson(this);
}
}
| 3,301 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backupv2/BackupTTLTask.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backupv2;
import com.netflix.priam.backup.*;
import com.netflix.priam.config.IBackupRestoreConfig;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.health.InstanceState;
import com.netflix.priam.identity.token.TokenRetriever;
import com.netflix.priam.scheduler.SimpleTimer;
import com.netflix.priam.scheduler.Task;
import com.netflix.priam.scheduler.TaskTimer;
import com.netflix.priam.utils.DateUtil;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Duration;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import java.util.*;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Provider;
import javax.inject.Singleton;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.math.Fraction;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is used to TTL or delete the SSTable components from the backups after they are not
* referenced in the backups for more than {@link IConfiguration#getBackupRetentionDays()}. This
* operation is executed on CRON and is configured via {@link
* IBackupRestoreConfig#getBackupTTLMonitorPeriodInSec()}.
*
* <p>To TTL the SSTable components we refer to the first manifest file on the remote file system
* after the TTL period. Any sstable components referenced in that manifest file should not be
* deleted. Any other sstable components (files) on remote file system before the TTL period can be
* safely deleted. Created by aagrawal on 11/26/18.
*/
@Singleton
public class BackupTTLTask extends Task {
private static final Logger logger = LoggerFactory.getLogger(BackupTTLTask.class);
private IBackupRestoreConfig backupRestoreConfig;
private IMetaProxy metaProxy;
private IBackupFileSystem fileSystem;
private Provider<AbstractBackupPath> abstractBackupPathProvider;
private InstanceState instanceState;
public static final String JOBNAME = "BackupTTLService";
private Map<String, Boolean> filesInMeta = new HashMap<>();
private List<Path> filesToDelete = new ArrayList<>();
private static final Lock lock = new ReentrantLock();
private final int BATCH_SIZE = 1000;
private final Instant start_of_feature = DateUtil.parseInstant("201801010000");
private final int maxWaitMillis;
@Inject
public BackupTTLTask(
IConfiguration configuration,
IBackupRestoreConfig backupRestoreConfig,
@Named("v2") IMetaProxy metaProxy,
IFileSystemContext backupFileSystemCtx,
Provider<AbstractBackupPath> abstractBackupPathProvider,
TokenRetriever tokenRetriever,
InstanceState instanceState)
throws Exception {
super(configuration);
this.backupRestoreConfig = backupRestoreConfig;
this.metaProxy = metaProxy;
this.fileSystem = backupFileSystemCtx.getFileStrategy(configuration);
this.abstractBackupPathProvider = abstractBackupPathProvider;
this.instanceState = instanceState;
this.maxWaitMillis =
configuration.isLocalBootstrapEnabled()
? Math.toIntExact(Duration.ofSeconds(1).toMillis())
: 1_000
* backupRestoreConfig.getBackupTTLMonitorPeriodInSec()
/ tokenRetriever.getRingPosition().getDenominator();
}
@Override
public void execute() throws Exception {
if (instanceState.getRestoreStatus() != null
&& instanceState.getRestoreStatus().getStatus() != null
&& instanceState.getRestoreStatus().getStatus() == Status.STARTED) {
logger.info("Not executing the TTL Task for backups as Priam is in restore mode.");
return;
}
// Do not allow more than one backupTTLService to run at the same time. This is possible
// as this happens on CRON.
if (!lock.tryLock()) {
logger.warn("{} is already running! Try again later.", JOBNAME);
throw new Exception(JOBNAME + " already running");
}
// Sleep a random amount but not so long that it will spill into the next token's turn.
if (maxWaitMillis > 0) Thread.sleep(new Random().nextInt(maxWaitMillis));
try {
filesInMeta.clear();
filesToDelete.clear();
Instant dateToTtl =
DateUtil.getInstant().minus(config.getBackupRetentionDays(), ChronoUnit.DAYS);
// Find the snapshot just after this date.
List<AbstractBackupPath> metas =
metaProxy.findMetaFiles(
new DateUtil.DateRange(dateToTtl, DateUtil.getInstant()));
if (metas.size() == 0) {
logger.info("No meta file found and thus cannot run TTL Service");
return;
}
// Get the first file after the TTL time as we get files which are sorted latest to
// oldest.
AbstractBackupPath metaFile = metas.get(metas.size() - 1);
// Download the meta file to local file system.
Path localFile = metaProxy.downloadMetaFile(metaFile);
// Walk over the file system iterator and if not in map, it is eligible for delete.
new MetaFileWalker().readMeta(localFile);
logger.info("No. of component files loaded from meta file: {}", filesInMeta.size());
// Delete the meta file downloaded locally
FileUtils.deleteQuietly(localFile.toFile());
// If there are no files listed in meta, do not delete. This could be a bug!!
if (filesInMeta.isEmpty()) {
logger.warn("Meta file was empty. This should not happen. Getting out!!");
return;
}
// Delete the old META files. We are giving start date which is so back in past to get
// all the META files.
// This feature did not exist in Jan 2018.
metas =
metaProxy.findMetaFiles(
new DateUtil.DateRange(
start_of_feature, dateToTtl.minus(1, ChronoUnit.HOURS)));
if (metas != null && metas.size() != 0) {
logger.info(
"Will delete(TTL) {} META files starting from: [{}]",
metas.size(),
metas.get(metas.size() - 1).getLastModified());
for (AbstractBackupPath meta : metas) {
deleteFile(meta, false);
}
}
Iterator<String> remoteFileLocations =
fileSystem.listFileSystem(getSSTPrefix(), null, null);
/*
We really cannot delete the files until the TTL period.
Cassandra can flush files on file system like Index.db first and other component files later (like 30 mins). If there is a snapshot in between, then this "single" component file would not be part of the snapshot as SSTable is still not part of Cassandra's "view". Only if Cassandra could provide strong guarantees on the file system such that -
1. All component will be flushed to disk as real SSTables only if they are part of the view. Until that happens all the files will be "tmp" files.
2. All component flushed will have the same "last modified" file. i.e. on the first flush. Stats.db can change over time and that is OK.
Since this is not the case, the TTL may end up deleting this file even though the file is part of the next snapshot. To avoid, this we add grace period (based on how long compaction can run) when we delete the files.
*/
dateToTtl = dateToTtl.minus(config.getGracePeriodDaysForCompaction(), ChronoUnit.DAYS);
logger.info(
"Will delete(TTL) SST_V2 files which are before this time: {}. Input: [TTL: {} days, Grace Period: {} days]",
dateToTtl,
config.getBackupRetentionDays(),
config.getGracePeriodDaysForCompaction());
while (remoteFileLocations.hasNext()) {
AbstractBackupPath abstractBackupPath = abstractBackupPathProvider.get();
abstractBackupPath.parseRemote(remoteFileLocations.next());
// If lastModifiedTime is after the dateToTTL, we should get out of this loop as
// remote file systems always give locations which are sorted.
if (abstractBackupPath.getLastModified().isAfter(dateToTtl)) {
logger.info(
"Breaking from TTL. Got a key which is after the TTL time: {}",
abstractBackupPath.getRemotePath());
break;
}
if (!filesInMeta.containsKey(abstractBackupPath.getRemotePath())) {
deleteFile(abstractBackupPath, false);
} else {
if (logger.isDebugEnabled())
logger.debug(
"Not deleting this key as it is referenced in backups: {}",
abstractBackupPath.getRemotePath());
}
}
// Delete remaining files.
deleteFile(null, true);
logger.info("Finished processing files for TTL service");
} finally {
lock.unlock();
}
}
private void deleteFile(AbstractBackupPath path, boolean forceClear)
throws BackupRestoreException {
if (path != null) filesToDelete.add(Paths.get(path.getRemotePath()));
if (forceClear || filesToDelete.size() >= BATCH_SIZE) {
fileSystem.deleteRemoteFiles(filesToDelete);
filesToDelete.clear();
}
}
private String getSSTPrefix() {
Path location = fileSystem.getPrefix();
AbstractBackupPath abstractBackupPath = abstractBackupPathProvider.get();
return abstractBackupPath
.remoteV2Prefix(location, AbstractBackupPath.BackupFileType.SST_V2)
.toString();
}
@Override
public String getName() {
return JOBNAME;
}
/**
* Interval between trying to TTL data on Remote file system.
*
* @param backupRestoreConfig {@link IBackupRestoreConfig#getBackupTTLMonitorPeriodInSec()} to
* get configuration details from priam. Use "-1" to disable the service.
* @return the timer to be used for backup ttl service.
* @throws Exception if the configuration is not set correctly or are not valid. This is to
* ensure we fail-fast.
*/
public static TaskTimer getTimer(
IBackupRestoreConfig backupRestoreConfig, Fraction ringPosition) throws Exception {
int period = backupRestoreConfig.getBackupTTLMonitorPeriodInSec();
Instant start = Instant.ofEpochSecond((long) (period * ringPosition.doubleValue()));
return new SimpleTimer(JOBNAME, period, start);
}
private class MetaFileWalker extends MetaFileReader {
@Override
public void process(ColumnFamilyResult columnfamilyResult) {
columnfamilyResult
.getSstables()
.forEach(
ssTableResult ->
ssTableResult
.getSstableComponents()
.forEach(
fileUploadResult ->
filesInMeta.put(
fileUploadResult
.getBackupPath(),
null)));
}
}
}
| 3,302 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backupv2/MetaV1Proxy.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backupv2;
import com.google.common.collect.Lists;
import com.netflix.priam.backup.*;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.utils.DateUtil;
import java.io.FileReader;
import java.nio.file.InvalidPathException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.temporal.ChronoUnit;
import java.util.*;
import javax.inject.Inject;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.collections4.iterators.FilterIterator;
import org.apache.commons.io.FileUtils;
import org.json.simple.parser.JSONParser;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Created by aagrawal on 12/18/18. */
public class MetaV1Proxy implements IMetaProxy {
private static final Logger logger = LoggerFactory.getLogger(MetaV1Proxy.class);
private final IBackupFileSystem fs;
@Inject
public MetaV1Proxy(IConfiguration configuration, IFileSystemContext backupFileSystemCtx) {
fs = backupFileSystemCtx.getFileStrategy(configuration);
}
@Override
public Path getLocalMetaFileDirectory() {
return null;
}
@Override
public String getMetaPrefix(DateUtil.DateRange dateRange) {
return null;
}
@Override
public List<AbstractBackupPath> findMetaFiles(DateUtil.DateRange dateRange) {
Date startTime = new Date(dateRange.getStartTime().toEpochMilli());
Date endTime = new Date(dateRange.getEndTime().toEpochMilli());
String restorePrefix = fs.getPrefix().toString();
logger.debug("Looking for snapshot meta file within restore prefix: {}", restorePrefix);
List<AbstractBackupPath> metas = Lists.newArrayList();
Iterator<AbstractBackupPath> backupfiles = fs.list(restorePrefix, startTime, endTime);
while (backupfiles.hasNext()) {
AbstractBackupPath path = backupfiles.next();
if (path.getType() == AbstractBackupPath.BackupFileType.META)
// Since there are now meta file for incrementals as well as snapshot, we need to
// find the correct one (i.e. the snapshot meta file (meta.json))
if (path.getFileName().equalsIgnoreCase("meta.json")) {
metas.add(path);
}
}
metas.sort(Collections.reverseOrder());
if (metas.size() == 0) {
logger.info(
"No meta v1 file found on remote file system for the time period: {}",
dateRange);
}
return metas;
}
@Override
public BackupVerificationResult isMetaFileValid(AbstractBackupPath metaBackupPath) {
BackupVerificationResult result = new BackupVerificationResult();
result.remotePath = metaBackupPath.getRemotePath();
result.snapshotInstant = metaBackupPath.getTime().toInstant();
try {
// Download the meta file.
Path metaFile = downloadMetaFile(metaBackupPath);
// Read the local meta file.
List<String> metaFileList = getSSTFilesFromMeta(metaFile);
FileUtils.deleteQuietly(metaFile.toFile());
result.manifestAvailable = true;
// List the remote file system to validate the backup.
String prefix = fs.getPrefix().toString();
Date strippedMsSnapshotTime =
new Date(result.snapshotInstant.truncatedTo(ChronoUnit.MINUTES).toEpochMilli());
Iterator<AbstractBackupPath> backupfiles =
fs.list(prefix, strippedMsSnapshotTime, strippedMsSnapshotTime);
// Return validation fail if backup filesystem listing failed.
if (!backupfiles.hasNext()) {
logger.warn(
"ERROR: No files available while doing backup filesystem listing. Declaring the verification failed.");
return result;
}
// Convert the remote listing to String.
List<String> remoteListing = new ArrayList<>();
while (backupfiles.hasNext()) {
AbstractBackupPath path = backupfiles.next();
if (path.getType() == AbstractBackupPath.BackupFileType.SNAP)
remoteListing.add(path.getRemotePath());
}
if (metaFileList.isEmpty() && remoteListing.isEmpty()) {
logger.info(
"Uncommon Scenario: Both meta file and backup filesystem listing is empty. Considering this as success");
result.valid = true;
return result;
}
ArrayList<String> filesMatched =
(ArrayList<String>) CollectionUtils.intersection(metaFileList, remoteListing);
result.filesMatched = filesMatched.size();
result.filesInMetaOnly = metaFileList;
result.filesInMetaOnly.removeAll(filesMatched);
// There could be a scenario that backupfilesystem has more files than meta file. e.g.
// some leftover objects
result.valid = (result.filesInMetaOnly.isEmpty());
} catch (Exception e) {
logger.error(
"Error while processing meta file: " + metaBackupPath, e.getLocalizedMessage());
e.printStackTrace();
}
return result;
}
@Override
public Path downloadMetaFile(AbstractBackupPath meta) throws BackupRestoreException {
fs.downloadFile(meta, ".download" /* suffix */, 10 /* retries */);
return Paths.get(meta.newRestoreFile().getAbsolutePath() + ".download");
}
@Override
public List<String> getSSTFilesFromMeta(Path localMetaPath) throws Exception {
if (localMetaPath.toFile().isDirectory() || !localMetaPath.toFile().exists())
throw new InvalidPathException(
localMetaPath.toString(), "Input path is either directory or do not exist");
List<String> result = new ArrayList<>();
JSONParser jsonParser = new JSONParser();
org.json.simple.JSONArray fileList =
(org.json.simple.JSONArray)
jsonParser.parse(new FileReader(localMetaPath.toFile()));
fileList.forEach(entry -> result.add(entry.toString()));
return result;
}
@Override
public Iterator<AbstractBackupPath> getIncrementals(DateUtil.DateRange dateRange) {
String prefix = fs.getPrefix().toString();
Iterator<AbstractBackupPath> iterator =
fs.list(
prefix,
new Date(dateRange.getStartTime().toEpochMilli()),
new Date(dateRange.getEndTime().toEpochMilli()));
return new FilterIterator<>(
iterator,
abstractBackupPath ->
abstractBackupPath.getType() == AbstractBackupPath.BackupFileType.SST);
}
@Override
public void cleanupOldMetaFiles() {}
}
| 3,303 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backupv2/ColumnFamilyResult.java | /**
* Copyright 2018 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.backupv2;
import com.google.common.collect.ImmutableSet;
import com.netflix.priam.utils.GsonJsonSerializer;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
/**
* This is a POJO to encapsulate all the SSTables for a given column family. Created by aagrawal on
* 7/1/18.
*/
public class ColumnFamilyResult {
private String keyspaceName;
private String columnfamilyName;
private List<SSTableResult> sstables = new ArrayList<>();
public ColumnFamilyResult(String keyspaceName, String columnfamilyName) {
this.keyspaceName = keyspaceName;
this.columnfamilyName = columnfamilyName;
}
public List<SSTableResult> getSstables() {
return sstables;
}
public void setSstables(List<SSTableResult> sstables) {
this.sstables = sstables;
}
public void addSstable(SSTableResult sstable) {
if (sstables == null) sstables = new ArrayList<>();
sstables.add(sstable);
}
@Override
public String toString() {
return GsonJsonSerializer.getGson().toJson(this);
}
/** This is a POJO to encapsulate a SSTable and all its components. */
public static class SSTableResult {
private String prefix;
private Set<FileUploadResult> sstableComponents;
public void setPrefix(String prefix) {
this.prefix = prefix;
}
public Set<FileUploadResult> getSstableComponents() {
return sstableComponents;
}
public void setSstableComponents(ImmutableSet<FileUploadResult> sstableComponents) {
this.sstableComponents = sstableComponents;
}
@Override
public String toString() {
return GsonJsonSerializer.getGson().toJson(this);
}
}
}
| 3,304 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backupv2/MetaFileReader.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backupv2;
import com.google.gson.stream.JsonReader;
import com.netflix.priam.utils.DateUtil;
import com.netflix.priam.utils.GsonJsonSerializer;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.nio.file.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This abstract class encapsulates the reading of meta file in streaming fashion. This is required
* as we could have a meta file which cannot fit in memory. Created by aagrawal on 7/3/18.
*/
public abstract class MetaFileReader {
private static final Logger logger = LoggerFactory.getLogger(MetaFileReader.class);
private MetaFileInfo metaFileInfo;
public MetaFileInfo getMetaFileInfo() {
return metaFileInfo;
}
/**
* Reads the local meta file as denoted by metaFilePath.
*
* @param metaFilePath local file path for the meta file.
* @throws IOException if not enough permissions or file is not valid format.
*/
public void readMeta(Path metaFilePath) throws IOException {
// Validate if meta file exists and is right file name.
if (metaFilePath == null
|| !metaFilePath.toFile().exists()
|| !metaFilePath.toFile().isFile()
|| !isValidMetaFile(metaFilePath)) {
throw new FileNotFoundException(
"MetaFilePath: " + metaFilePath + " do not exist or is not valid meta file.");
}
// Read the meta file.
logger.info("Trying to read the meta file: {}", metaFilePath);
JsonReader jsonReader = new JsonReader(new FileReader(metaFilePath.toFile()));
jsonReader.beginObject();
while (jsonReader.hasNext()) {
switch (jsonReader.nextName()) {
case MetaFileInfo.META_FILE_INFO:
metaFileInfo =
GsonJsonSerializer.getGson().fromJson(jsonReader, MetaFileInfo.class);
break;
case MetaFileInfo.META_FILE_DATA:
jsonReader.beginArray();
while (jsonReader.hasNext())
process(
GsonJsonSerializer.getGson()
.fromJson(jsonReader, ColumnFamilyResult.class));
jsonReader.endArray();
}
}
jsonReader.endObject();
jsonReader.close();
logger.info("Finished reading the meta file: {}", metaFilePath);
}
/**
* Process the columnfamily result obtained after reading meta file.
*
* @param columnfamilyResult {@link ColumnFamilyResult} POJO containing the column family data
* (all SSTables references) obtained from meta.json.
*/
public abstract void process(ColumnFamilyResult columnfamilyResult);
/**
* Returns if it is a valid meta file name.
*
* @param metaFilePath Path to the local meta file
* @return true, if metafile name is valid.
*/
public boolean isValidMetaFile(Path metaFilePath) {
String fileName = metaFilePath.toFile().getName();
if (fileName.startsWith(MetaFileInfo.META_FILE_PREFIX)
&& fileName.endsWith(MetaFileInfo.META_FILE_SUFFIX)) {
// is valid date?
String dateString =
fileName.substring(
MetaFileInfo.META_FILE_PREFIX.length(),
fileName.length() - MetaFileInfo.META_FILE_SUFFIX.length());
DateUtil.parseInstant(dateString);
return true;
}
return false;
}
}
| 3,305 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backupv2/BackupVerificationTask.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backupv2;
import com.netflix.priam.backup.*;
import com.netflix.priam.config.IBackupRestoreConfig;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.health.InstanceState;
import com.netflix.priam.merics.BackupMetrics;
import com.netflix.priam.notification.BackupNotificationMgr;
import com.netflix.priam.scheduler.CronTimer;
import com.netflix.priam.scheduler.Task;
import com.netflix.priam.scheduler.TaskTimer;
import com.netflix.priam.utils.DateUtil;
import com.netflix.priam.utils.DateUtil.DateRange;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import java.util.List;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Created by aagrawal on 1/28/19. */
@Singleton
public class BackupVerificationTask extends Task {
private static final Logger logger = LoggerFactory.getLogger(BackupVerificationTask.class);
public static final String JOBNAME = "BackupVerificationService";
private IBackupRestoreConfig backupRestoreConfig;
private BackupVerification backupVerification;
private BackupMetrics backupMetrics;
private InstanceState instanceState;
private BackupNotificationMgr backupNotificationMgr;
@Inject
public BackupVerificationTask(
IConfiguration configuration,
IBackupRestoreConfig backupRestoreConfig,
BackupVerification backupVerification,
BackupMetrics backupMetrics,
InstanceState instanceState,
BackupNotificationMgr backupNotificationMgr) {
super(configuration);
this.backupRestoreConfig = backupRestoreConfig;
this.backupVerification = backupVerification;
this.backupMetrics = backupMetrics;
this.instanceState = instanceState;
this.backupNotificationMgr = backupNotificationMgr;
}
@Override
public void execute() throws Exception {
// Ensure that backup version 2.0 is actually enabled.
if (backupRestoreConfig.getSnapshotMetaServiceCronExpression().equals("-1")) {
logger.info("Skipping backup verification. V2 backups are not enabled.");
return;
}
if (instanceState.getRestoreStatus() != null
&& instanceState.getRestoreStatus().getStatus() != null
&& instanceState.getRestoreStatus().getStatus() == Status.STARTED) {
logger.info("Skipping backup verification. Priam is in restore mode.");
return;
}
// Validate the backup done in last x hours.
Instant now = DateUtil.getInstant();
Instant slo =
now.minus(backupRestoreConfig.getBackupVerificationSLOInHours(), ChronoUnit.HOURS);
DateRange dateRange = new DateRange(slo, now);
List<BackupMetadata> verifiedBackups =
backupVerification.verifyBackupsInRange(
BackupVersion.SNAPSHOT_META_SERVICE, dateRange);
verifiedBackups
.stream()
.filter(result -> result.getLastValidated().toInstant().isAfter(now))
.forEach(
result -> {
Path snapshotLocation = Paths.get(result.getSnapshotLocation());
String snapshotKey =
snapshotLocation
.subpath(1, snapshotLocation.getNameCount())
.toString();
logger.info(
"Sending {} message for backup: {}",
AbstractBackupPath.BackupFileType.SNAPSHOT_VERIFIED,
snapshotKey);
backupNotificationMgr.notify(
snapshotKey, result.getStart().toInstant());
});
if (verifiedBackups.isEmpty()) {
logger.error(
"Not able to find any snapshot which is valid in our SLO window: {} hours",
backupRestoreConfig.getBackupVerificationSLOInHours());
backupMetrics.incrementBackupVerificationFailure();
}
}
/**
* Interval between trying to verify data manifest file on Remote file system.
*
* @param backupRestoreConfig {@link IBackupRestoreConfig#getBackupVerificationCronExpression()}
* to get configuration details from priam. Use "-1" to disable the service.
* @return the timer to be used for snapshot verification service.
* @throws Exception if the configuration is not set correctly or are not valid. This is to
* ensure we fail-fast.
*/
public static TaskTimer getTimer(IBackupRestoreConfig backupRestoreConfig) throws Exception {
String cronExpression = backupRestoreConfig.getBackupVerificationCronExpression();
return CronTimer.getCronTimer(JOBNAME, cronExpression);
}
@Override
public String getName() {
return JOBNAME;
}
}
| 3,306 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backupv2/MetaV2Proxy.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backupv2;
import com.netflix.priam.backup.*;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.utils.DateUtil;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.*;
import javax.inject.Inject;
import javax.inject.Provider;
import org.apache.commons.collections4.iterators.FilterIterator;
import org.apache.commons.collections4.iterators.TransformIterator;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.filefilter.FileFilterUtils;
import org.apache.commons.io.filefilter.IOFileFilter;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Do any management task for meta files. Created by aagrawal on 8/2/18. */
public class MetaV2Proxy implements IMetaProxy {
private static final Logger logger = LoggerFactory.getLogger(MetaV2Proxy.class);
private final Path metaFileDirectory;
private final IBackupFileSystem fs;
private final Provider<AbstractBackupPath> abstractBackupPathProvider;
@Inject
public MetaV2Proxy(
IConfiguration configuration,
IFileSystemContext backupFileSystemCtx,
Provider<AbstractBackupPath> abstractBackupPathProvider) {
fs = backupFileSystemCtx.getFileStrategy(configuration);
this.abstractBackupPathProvider = abstractBackupPathProvider;
metaFileDirectory = Paths.get(configuration.getDataFileLocation());
}
@Override
public Path getLocalMetaFileDirectory() {
return metaFileDirectory;
}
@Override
public String getMetaPrefix(DateUtil.DateRange dateRange) {
return getMatch(dateRange, AbstractBackupPath.BackupFileType.META_V2);
}
private String getMatch(
DateUtil.DateRange dateRange, AbstractBackupPath.BackupFileType backupFileType) {
Path location = fs.getPrefix();
AbstractBackupPath abstractBackupPath = abstractBackupPathProvider.get();
String match = StringUtils.EMPTY;
if (dateRange != null) match = dateRange.match();
if (dateRange != null && dateRange.getEndTime() == null)
match = dateRange.getStartTime().toEpochMilli() + "";
return Paths.get(
abstractBackupPath.remoteV2Prefix(location, backupFileType).toString(),
match)
.toString();
}
@Override
public Iterator<AbstractBackupPath> getIncrementals(DateUtil.DateRange dateRange) {
String incrementalPrefix = getMatch(dateRange, AbstractBackupPath.BackupFileType.SST_V2);
String marker =
getMatch(
new DateUtil.DateRange(dateRange.getStartTime(), null),
AbstractBackupPath.BackupFileType.SST_V2);
logger.info(
"Listing filesystem with prefix: {}, marker: {}, daterange: {}",
incrementalPrefix,
marker,
dateRange);
Iterator<String> iterator = fs.listFileSystem(incrementalPrefix, null, marker);
Iterator<AbstractBackupPath> transformIterator =
new TransformIterator<>(
iterator,
s -> {
AbstractBackupPath path = abstractBackupPathProvider.get();
path.parseRemote(s);
return path;
});
return new FilterIterator<>(
transformIterator,
abstractBackupPath ->
(abstractBackupPath.getLastModified().isAfter(dateRange.getStartTime())
&& abstractBackupPath
.getLastModified()
.isBefore(dateRange.getEndTime()))
|| abstractBackupPath
.getLastModified()
.equals(dateRange.getStartTime())
|| abstractBackupPath
.getLastModified()
.equals(dateRange.getEndTime()));
}
@Override
public List<AbstractBackupPath> findMetaFiles(DateUtil.DateRange dateRange) {
ArrayList<AbstractBackupPath> metas = new ArrayList<>();
String prefix = getMetaPrefix(dateRange);
String marker = getMetaPrefix(new DateUtil.DateRange(dateRange.getStartTime(), null));
logger.info(
"Listing filesystem with prefix: {}, marker: {}, daterange: {}",
prefix,
marker,
dateRange);
Iterator<String> iterator = fs.listFileSystem(prefix, null, marker);
while (iterator.hasNext()) {
AbstractBackupPath abstractBackupPath = abstractBackupPathProvider.get();
abstractBackupPath.parseRemote(iterator.next());
logger.debug("Meta file found: {}", abstractBackupPath);
if (abstractBackupPath.getLastModified().toEpochMilli()
>= dateRange.getStartTime().toEpochMilli()
&& abstractBackupPath.getLastModified().toEpochMilli()
<= dateRange.getEndTime().toEpochMilli()) {
metas.add(abstractBackupPath);
}
}
metas.sort(Collections.reverseOrder());
if (metas.size() == 0) {
logger.info(
"No meta file found on remote file system for the time period: {}", dateRange);
}
return metas;
}
@Override
public Path downloadMetaFile(AbstractBackupPath meta) throws BackupRestoreException {
fs.downloadFile(meta, "" /* suffix */, 10 /* retries */);
return Paths.get(meta.newRestoreFile().getAbsolutePath());
}
@Override
public void cleanupOldMetaFiles() {
logger.info("Deleting any old META_V2 files if any");
IOFileFilter fileNameFilter =
FileFilterUtils.and(
FileFilterUtils.prefixFileFilter(MetaFileInfo.META_FILE_PREFIX),
FileFilterUtils.or(
FileFilterUtils.suffixFileFilter(MetaFileInfo.META_FILE_SUFFIX),
FileFilterUtils.suffixFileFilter(
MetaFileInfo.META_FILE_SUFFIX + ".tmp")));
Collection<File> files =
FileUtils.listFiles(metaFileDirectory.toFile(), fileNameFilter, null);
files.stream()
.filter(File::isFile)
.forEach(
file -> {
logger.debug(
"Deleting old META_V2 file found: {}", file.getAbsolutePath());
file.delete();
});
}
@Override
public List<String> getSSTFilesFromMeta(Path localMetaPath) throws Exception {
MetaFileBackupWalker metaFileBackupWalker = new MetaFileBackupWalker();
metaFileBackupWalker.readMeta(localMetaPath);
return metaFileBackupWalker.backupRemotePaths;
}
@Override
public BackupVerificationResult isMetaFileValid(AbstractBackupPath metaBackupPath) {
MetaFileBackupValidator metaFileBackupValidator = new MetaFileBackupValidator();
BackupVerificationResult result = metaFileBackupValidator.verificationResult;
result.remotePath = metaBackupPath.getRemotePath();
result.snapshotInstant = metaBackupPath.getLastModified();
Path metaFile = null;
try {
metaFile = downloadMetaFile(metaBackupPath);
result.manifestAvailable = true;
metaFileBackupValidator.readMeta(metaFile);
result.valid = (result.filesInMetaOnly.isEmpty());
} catch (FileNotFoundException fne) {
logger.error(fne.getLocalizedMessage());
} catch (IOException ioe) {
logger.error(
"IO Error while processing meta file: " + metaFile, ioe.getLocalizedMessage());
ioe.printStackTrace();
} catch (BackupRestoreException bre) {
logger.error("Error while trying to download the manifest file: {}", metaBackupPath);
} finally {
if (metaFile != null) FileUtils.deleteQuietly(metaFile.toFile());
}
return result;
}
private class MetaFileBackupValidator extends MetaFileReader {
private BackupVerificationResult verificationResult = new BackupVerificationResult();
@Override
public void process(ColumnFamilyResult columnfamilyResult) {
for (ColumnFamilyResult.SSTableResult ssTableResult :
columnfamilyResult.getSstables()) {
for (FileUploadResult fileUploadResult : ssTableResult.getSstableComponents()) {
if (fs.checkObjectExists(Paths.get(fileUploadResult.getBackupPath()))) {
verificationResult.filesMatched++;
} else {
verificationResult.filesInMetaOnly.add(fileUploadResult.getBackupPath());
}
}
}
}
}
private class MetaFileBackupWalker extends MetaFileReader {
private List<String> backupRemotePaths = new ArrayList<>();
@Override
public void process(ColumnFamilyResult columnfamilyResult) {
for (ColumnFamilyResult.SSTableResult ssTableResult :
columnfamilyResult.getSstables()) {
for (FileUploadResult fileUploadResult : ssTableResult.getSstableComponents()) {
backupRemotePaths.add(fileUploadResult.getBackupPath());
}
}
}
}
}
| 3,307 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backupv2/MetaFileWriterBuilder.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backupv2;
import com.google.common.collect.ImmutableCollection;
import com.google.common.collect.ImmutableMultimap;
import com.google.common.collect.ImmutableSet;
import com.google.gson.stream.JsonWriter;
import com.netflix.priam.backup.AbstractBackupPath;
import com.netflix.priam.backup.IBackupFileSystem;
import com.netflix.priam.backup.IFileSystemContext;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.identity.InstanceIdentity;
import java.io.FileWriter;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Provider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class will help in generation of meta.json files. This will encapsulate all the SSTables
* that were there on the file system. This will write the meta.json file as a JSON blob. NOTE: We
* want to ensure that it is done via streaming JSON write to ensure we do not consume memory to
* load all these objects in memory. With multi-tenant clusters or LCS enabled on large number of
* CF's it is easy to have 1000's of SSTables (thus 1000's of SSTable components) across CF's.
* Created by aagrawal on 6/12/18.
*/
public class MetaFileWriterBuilder {
private final MetaFileWriter metaFileWriter;
private static final Logger logger = LoggerFactory.getLogger(MetaFileWriterBuilder.class);
@Inject
public MetaFileWriterBuilder(MetaFileWriter metaFileWriter) {
this.metaFileWriter = metaFileWriter;
}
public StartStep newBuilder() throws IOException {
return metaFileWriter;
}
public interface StartStep {
DataStep startMetaFileGeneration(Instant snapshotInstant) throws IOException;
}
public interface DataStep {
ColumnFamilyResult addColumnfamilyResult(
String keyspace,
String columnFamily,
ImmutableMultimap<String, AbstractBackupPath> sstables)
throws IOException;
UploadStep endMetaFileGeneration() throws IOException;
}
public interface UploadStep {
void uploadMetaFile() throws Exception;
Path getMetaFilePath();
String getRemoteMetaFilePath() throws Exception;
}
public static class MetaFileWriter implements StartStep, DataStep, UploadStep {
private final Provider<AbstractBackupPath> pathFactory;
private final IBackupFileSystem backupFileSystem;
private final MetaFileInfo metaFileInfo;
private final IMetaProxy metaProxy;
private JsonWriter jsonWriter;
private Instant snapshotInstant;
private Path metaFilePath;
@Inject
public MetaFileWriter(
IConfiguration configuration,
InstanceIdentity instanceIdentity,
Provider<AbstractBackupPath> pathFactory,
IFileSystemContext backupFileSystemCtx,
@Named("v2") IMetaProxy metaProxy) {
this.pathFactory = pathFactory;
this.backupFileSystem = backupFileSystemCtx.getFileStrategy(configuration);
this.metaProxy = metaProxy;
List<String> backupIdentifier = new ArrayList<>();
backupIdentifier.add(instanceIdentity.getInstance().getToken());
metaFileInfo =
new MetaFileInfo(
configuration.getAppName(),
instanceIdentity.getInstanceInfo().getRegion(),
instanceIdentity.getInstanceInfo().getRac(),
backupIdentifier);
}
/**
* Start the generation of meta file.
*
* @throws IOException if unable to write to meta file (permissions, disk full etc)
*/
public DataStep startMetaFileGeneration(Instant snapshotInstant) throws IOException {
// Compute meta file name.
this.snapshotInstant = snapshotInstant;
String fileName = MetaFileInfo.getMetaFileName(snapshotInstant);
metaFilePath = Paths.get(metaProxy.getLocalMetaFileDirectory().toString(), fileName);
Path tempMetaFilePath =
Paths.get(metaProxy.getLocalMetaFileDirectory().toString(), fileName + ".tmp");
logger.info("Starting to write a new meta file: {}", metaFilePath);
jsonWriter = new JsonWriter(new FileWriter(tempMetaFilePath.toFile()));
jsonWriter.beginObject();
jsonWriter.name(MetaFileInfo.META_FILE_INFO);
jsonWriter.jsonValue(metaFileInfo.toString());
jsonWriter.name(MetaFileInfo.META_FILE_DATA);
jsonWriter.beginArray();
return this;
}
/**
* Add {@link ColumnFamilyResult} after it has been processed so it can be streamed to
* meta.json. Streaming write to meta.json is required so we don't get Priam OOM.
*
* @throws IOException if unable to write to the file or if JSON is not valid
*/
public ColumnFamilyResult addColumnfamilyResult(
String keyspace,
String columnFamily,
ImmutableMultimap<String, AbstractBackupPath> sstables)
throws IOException {
if (jsonWriter == null)
throw new NullPointerException(
"addColumnfamilyResult: Json Writer in MetaFileWriter is null. This should not happen!");
ColumnFamilyResult result = toColumnFamilyResult(keyspace, columnFamily, sstables);
jsonWriter.jsonValue(result.toString());
return result;
}
/**
* Finish the generation of meta.json file and save it on local media.
*
* @return {@link Path} to the local meta.json produced.
* @throws IOException if unable to write to file or if JSON is not valid
*/
public MetaFileWriterBuilder.UploadStep endMetaFileGeneration() throws IOException {
if (jsonWriter == null)
throw new NullPointerException(
"endMetaFileGeneration: Json Writer in MetaFileWriter is null. This should not happen!");
jsonWriter.endArray();
jsonWriter.endObject();
jsonWriter.close();
Path tempMetaFilePath =
Paths.get(
metaProxy.getLocalMetaFileDirectory().toString(),
metaFilePath.toFile().getName() + ".tmp");
// Rename the tmp file.
tempMetaFilePath.toFile().renameTo(metaFilePath.toFile());
// Set the last modified time to snapshot time as generating manifest file may take some
// time.
metaFilePath.toFile().setLastModified(snapshotInstant.toEpochMilli());
logger.info("Finished writing to meta file: {}", metaFilePath);
return this;
}
/**
* Upload the meta file generated to backup file system.
*
* @throws Exception when unable to upload the meta file.
*/
public void uploadMetaFile() throws Exception {
AbstractBackupPath abstractBackupPath = pathFactory.get();
abstractBackupPath.parseLocal(
metaFilePath.toFile(), AbstractBackupPath.BackupFileType.META_V2);
backupFileSystem.uploadAndDelete(abstractBackupPath, false /* async */);
}
public Path getMetaFilePath() {
return metaFilePath;
}
public String getRemoteMetaFilePath() throws Exception {
AbstractBackupPath abstractBackupPath = pathFactory.get();
abstractBackupPath.parseLocal(
metaFilePath.toFile(), AbstractBackupPath.BackupFileType.META_V2);
return abstractBackupPath.getRemotePath();
}
private ColumnFamilyResult toColumnFamilyResult(
String keyspace,
String columnFamily,
ImmutableMultimap<String, AbstractBackupPath> sstables) {
ColumnFamilyResult columnfamilyResult = new ColumnFamilyResult(keyspace, columnFamily);
sstables.keySet()
.stream()
.map(k -> toSSTableResult(k, sstables.get(k)))
.forEach(columnfamilyResult::addSstable);
return columnfamilyResult;
}
private ColumnFamilyResult.SSTableResult toSSTableResult(
String prefix, ImmutableCollection<AbstractBackupPath> sstable) {
ColumnFamilyResult.SSTableResult ssTableResult = new ColumnFamilyResult.SSTableResult();
ssTableResult.setPrefix(prefix);
ssTableResult.setSstableComponents(
ImmutableSet.copyOf(
sstable.stream()
.map(this::toFileUploadResult)
.collect(Collectors.toSet())));
return ssTableResult;
}
private FileUploadResult toFileUploadResult(AbstractBackupPath path) {
FileUploadResult fileUploadResult = new FileUploadResult(path);
try {
Path backupPath = Paths.get(fileUploadResult.getBackupPath());
fileUploadResult.setUploaded(backupFileSystem.checkObjectExists(backupPath));
} catch (Exception e) {
logger.error("Error checking if file exists. Ignoring as it is not fatal.", e);
}
return fileUploadResult;
}
}
}
| 3,308 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backupv2/BackupV2Service.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backupv2;
import com.netflix.priam.backup.IncrementalBackup;
import com.netflix.priam.config.IBackupRestoreConfig;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.defaultimpl.IService;
import com.netflix.priam.identity.token.ITokenRetriever;
import com.netflix.priam.scheduler.PriamScheduler;
import com.netflix.priam.scheduler.TaskTimer;
import com.netflix.priam.tuner.CassandraTunerService;
import javax.inject.Inject;
import org.apache.commons.lang3.math.Fraction;
/**
* Encapsulate the backup service 2.0 - Execute all the tasks required to run backup service.
* Created by aagrawal on 3/9/19.
*/
public class BackupV2Service implements IService {
private final PriamScheduler scheduler;
private final IConfiguration configuration;
private final IBackupRestoreConfig backupRestoreConfig;
private final SnapshotMetaTask snapshotMetaTask;
private final CassandraTunerService cassandraTunerService;
private final ITokenRetriever tokenRetriever;
@Inject
public BackupV2Service(
IConfiguration configuration,
IBackupRestoreConfig backupRestoreConfig,
PriamScheduler scheduler,
SnapshotMetaTask snapshotMetaService,
CassandraTunerService cassandraTunerService,
ITokenRetriever tokenRetriever) {
this.configuration = configuration;
this.backupRestoreConfig = backupRestoreConfig;
this.scheduler = scheduler;
this.snapshotMetaTask = snapshotMetaService;
this.cassandraTunerService = cassandraTunerService;
this.tokenRetriever = tokenRetriever;
}
@Override
public void scheduleService() throws Exception {
TaskTimer snapshotMetaTimer = SnapshotMetaTask.getTimer(backupRestoreConfig);
if (snapshotMetaTimer == null) {
SnapshotMetaTask.cleanOldBackups(configuration);
}
scheduleTask(scheduler, SnapshotMetaTask.class, snapshotMetaTimer);
if (snapshotMetaTimer != null) {
// Try to upload previous snapshots, if any which might have been interrupted by Priam
// restart.
snapshotMetaTask.uploadFiles();
// Schedule the backup verification service
scheduleTask(
scheduler,
BackupVerificationTask.class,
BackupVerificationTask.getTimer(backupRestoreConfig));
} else {
scheduler.deleteTask(BackupVerificationTask.JOBNAME);
}
// Schedule the TTL service
// We cannot get the ring position in a local bootstrap scenario
Fraction ringPosition =
configuration.isLocalBootstrapEnabled()
? Fraction.ONE_HALF
: tokenRetriever.getRingPosition();
TaskTimer timer = BackupTTLTask.getTimer(backupRestoreConfig, ringPosition);
scheduleTask(scheduler, BackupTTLTask.class, timer);
// Start the Incremental backup schedule if enabled
scheduleTask(
scheduler,
IncrementalBackup.class,
IncrementalBackup.getTimer(configuration, backupRestoreConfig));
}
@Override
public void updateServicePre() throws Exception {
// Update the cassandra to enable/disable new incremental files.
cassandraTunerService.onChangeUpdateService();
}
@Override
public void updateServicePost() throws Exception {}
}
| 3,309 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backupv2/SnapshotMetaTask.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backupv2;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableSetMultimap;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import com.netflix.priam.backup.*;
import com.netflix.priam.config.IBackupRestoreConfig;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.connection.CassandraOperations;
import com.netflix.priam.health.CassandraMonitor;
import com.netflix.priam.identity.InstanceIdentity;
import com.netflix.priam.scheduler.CronTimer;
import com.netflix.priam.scheduler.TaskTimer;
import com.netflix.priam.utils.DateUtil;
import java.io.File;
import java.io.IOException;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.text.ParseException;
import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
import java.time.ZoneId;
import java.time.temporal.ChronoUnit;
import java.util.*;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Singleton;
import org.apache.commons.io.FileUtils;
import org.quartz.CronExpression;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This service will run on CRON as specified by {@link
* IBackupRestoreConfig#getSnapshotMetaServiceCronExpression()} The intent of this service is to run
* a full snapshot on Cassandra, get the list of the SSTables on disk and then create a
* manifest.json file which will encapsulate the list of the files i.e. capture filesystem at a
* moment in time. This manifest.json file will ensure the true filesystem status is exposed (for
* external entities) and will be used in future for Priam Backup Version 2 where a file is not
* uploaded to backup file system unless SSTable has been modified. This will lead to huge reduction
* in storage costs and provide bandwidth back to Cassandra instead of creating/uploading snapshots.
* Note that this component will "try" to enqueue the files to upload, but no guarantee is provided.
* If the enqueue fails for any reason, it is considered "OK" as there will be another service
* pushing all the files in the queue for upload (think of this like a cleanup thread and will help
* us in "resuming" any failed backup for any reason). Created by aagrawal on 6/18/18.
*/
@Singleton
public class SnapshotMetaTask extends AbstractBackup {
public static final String JOBNAME = "SnapshotMetaService";
private static final Logger logger = LoggerFactory.getLogger(SnapshotMetaTask.class);
private static final String SNAPSHOT_PREFIX = "snap_v2_";
private static final String CASSANDRA_MANIFEST_FILE = "manifest.json";
private static final String CASSANDRA_SCHEMA_FILE = "schema.cql";
private static final TimeZone UTC = TimeZone.getTimeZone(ZoneId.of("UTC"));
private final BackupRestoreUtil backupRestoreUtil;
private final MetaFileWriterBuilder metaFileWriter;
private MetaFileWriterBuilder.DataStep dataStep;
private final IMetaProxy metaProxy;
private final CassandraOperations cassandraOperations;
private String snapshotName = null;
private static final Lock lock = new ReentrantLock();
private final IBackupStatusMgr snapshotStatusMgr;
private final InstanceIdentity instanceIdentity;
private final IConfiguration config;
private final Clock clock;
private final IBackupRestoreConfig backupRestoreConfig;
private final BackupVerification backupVerification;
private final BackupHelper backupHelper;
private enum MetaStep {
META_GENERATION,
UPLOAD_FILES
}
private MetaStep metaStep = MetaStep.META_GENERATION;
@Inject
public SnapshotMetaTask(
IConfiguration config,
BackupHelper backupHelper,
MetaFileWriterBuilder metaFileWriter,
@Named("v2") IMetaProxy metaProxy,
InstanceIdentity instanceIdentity,
IBackupStatusMgr snapshotStatusMgr,
CassandraOperations cassandraOperations,
Clock clock,
IBackupRestoreConfig backupRestoreConfig,
BackupVerification backupVerification) {
super(config);
this.config = config;
this.backupHelper = backupHelper;
this.instanceIdentity = instanceIdentity;
this.snapshotStatusMgr = snapshotStatusMgr;
this.cassandraOperations = cassandraOperations;
this.clock = clock;
this.backupRestoreConfig = backupRestoreConfig;
this.backupVerification = backupVerification;
backupRestoreUtil =
new BackupRestoreUtil(
config.getSnapshotIncludeCFList(), config.getSnapshotExcludeCFList());
this.metaFileWriter = metaFileWriter;
this.metaProxy = metaProxy;
}
/**
* Interval between generating snapshot meta file using {@link SnapshotMetaTask}.
*
* @param config {@link IBackupRestoreConfig#getSnapshotMetaServiceCronExpression()} to get
* configuration details from priam. Use "-1" to disable the service.
* @return the timer to be used for snapshot meta service.
* @throws IllegalArgumentException if the configuration is not set correctly or are not valid.
* This is to ensure we fail-fast.
*/
public static TaskTimer getTimer(IBackupRestoreConfig config) throws IllegalArgumentException {
return CronTimer.getCronTimer(JOBNAME, config.getSnapshotMetaServiceCronExpression());
}
static void cleanOldBackups(IConfiguration config) throws Exception {
// Clean up all the backup directories, if any.
Set<Path> backupPaths = AbstractBackup.getBackupDirectories(config, SNAPSHOT_FOLDER);
for (Path backupDirPath : backupPaths)
try (DirectoryStream<Path> directoryStream =
Files.newDirectoryStream(backupDirPath, Files::isDirectory)) {
for (Path backupDir : directoryStream) {
if (backupDir.toFile().getName().startsWith(SNAPSHOT_PREFIX)) {
FileUtils.deleteDirectory(backupDir.toFile());
}
}
}
}
public static boolean isBackupEnabled(IBackupRestoreConfig backupRestoreConfig)
throws Exception {
return (getTimer(backupRestoreConfig) != null);
}
String generateSnapshotName(Instant snapshotInstant) {
return SNAPSHOT_PREFIX + DateUtil.formatInstant(DateUtil.yyyyMMddHHmm, snapshotInstant);
}
/**
* Enqueue all the files for upload in the snapshot directory. This will only enqueue the files
* and do not give guarantee as when they will be uploaded. It will only try to upload files
* which matches backup version 2.0 naming conventions.
*/
public void uploadFiles() {
try {
// enqueue all the old snapshot folder for upload/delete, if any, as we don't want
// our disk to be filled by them.
metaStep = MetaStep.UPLOAD_FILES;
initiateBackup(SNAPSHOT_FOLDER, backupRestoreUtil);
logger.info("Finished queuing the files for upload");
} catch (Exception e) {
logger.error("Error while trying to upload all the files", e);
e.printStackTrace();
} finally {
metaStep = MetaStep.META_GENERATION;
}
}
@Override
public void execute() throws Exception {
if (!CassandraMonitor.hasCassadraStarted()) {
logger.debug("Cassandra has not started, hence SnapshotMetaService will not run");
return;
}
// Do not allow more than one snapshotMetaService to run at the same time. This is possible
// as this happens on CRON.
if (!lock.tryLock()) {
logger.warn("SnapshotMetaService is already running! Try again later.");
throw new Exception("SnapshotMetaService already running");
}
// Save start snapshot status
Instant snapshotInstant = clock.instant();
String token = instanceIdentity.getInstance().getToken();
BackupMetadata backupMetadata =
new BackupMetadata(
BackupVersion.SNAPSHOT_META_SERVICE,
token,
new Date(snapshotInstant.toEpochMilli()));
snapshotStatusMgr.start(backupMetadata);
try {
snapshotName = generateSnapshotName(snapshotInstant);
logger.info("Initializing SnapshotMetaService for taking a snapshot {}", snapshotName);
// Perform a cleanup of old snapshot meta_v2.json files, if any, as we don't want our
// disk to be filled by them.
// These files may be leftover
// 1) when Priam shutdown in middle of this service and may not be full JSON
// 2) No permission to upload to backup file system.
metaProxy.cleanupOldMetaFiles();
// Take a new snapshot
cassandraOperations.takeSnapshot(snapshotName);
backupMetadata.setCassandraSnapshotSuccess(true);
// Process the snapshot and upload the meta file.
MetaFileWriterBuilder.UploadStep uploadStep = processSnapshot(snapshotInstant);
backupMetadata.setSnapshotLocation(
config.getBackupPrefix() + File.separator + uploadStep.getRemoteMetaFilePath());
uploadStep.uploadMetaFile();
logger.info("Finished processing snapshot meta service");
// Upload all the files from snapshot
uploadFiles();
snapshotStatusMgr.finish(backupMetadata);
} catch (Exception e) {
logger.error("Error while executing SnapshotMetaService", e);
snapshotStatusMgr.failed(backupMetadata);
} finally {
lock.unlock();
}
}
MetaFileWriterBuilder.UploadStep processSnapshot(Instant snapshotInstant) throws Exception {
dataStep = metaFileWriter.newBuilder().startMetaFileGeneration(snapshotInstant);
initiateBackup(SNAPSHOT_FOLDER, backupRestoreUtil);
return dataStep.endMetaFileGeneration();
}
private File getValidSnapshot(File snapshotDir, String snapshotName) {
File[] snapshotDirectories = snapshotDir.listFiles();
if (snapshotDirectories != null)
for (File fileName : snapshotDirectories)
if (fileName.exists()
&& fileName.isDirectory()
&& fileName.getName().matches(snapshotName)) return fileName;
return null;
}
@Override
public String getName() {
return JOBNAME;
}
private void uploadAllFiles(final File backupDir) throws Exception {
// Process all the snapshots with SNAPSHOT_PREFIX. This will ensure that we "resume" the
// uploads of previous snapshot leftover as Priam restarted or any failure for any reason
// (like we exhausted the wait time for upload)
File[] snapshotDirectories = backupDir.listFiles();
if (snapshotDirectories != null) {
Instant target = getUploadTarget();
for (File snapshotDirectory : snapshotDirectories) {
// Is it a valid SNAPSHOT_PREFIX
if (!snapshotDirectory.getName().startsWith(SNAPSHOT_PREFIX)
|| !snapshotDirectory.isDirectory()) continue;
if (FileUtils.sizeOfDirectory(snapshotDirectory) == 0) {
FileUtils.deleteQuietly(snapshotDirectory);
continue;
}
// Process each snapshot of SNAPSHOT_PREFIX
// We do not want to wait for completion and we just want to add them to queue. This
// is to ensure that next run happens on time.
AbstractBackupPath.BackupFileType type = AbstractBackupPath.BackupFileType.SST_V2;
backupHelper
.uploadAndDeleteAllFiles(snapshotDirectory, type, target, true)
.forEach(future -> addCallback(future, snapshotDirectory));
}
}
}
private Instant getUploadTarget() {
Instant now = clock.instant();
Instant target =
now.plus(config.getTargetMinutesToCompleteSnaphotUpload(), ChronoUnit.MINUTES);
Duration verificationSLO =
Duration.ofHours(backupRestoreConfig.getBackupVerificationSLOInHours());
Instant verificationDeadline =
backupVerification
.getLatestVerfifiedBackupTime()
.map(backupTime -> backupTime.plus(verificationSLO))
.orElse(Instant.MAX);
Instant nextSnapshotTime;
try {
CronExpression snapshotCron =
new CronExpression(backupRestoreConfig.getSnapshotMetaServiceCronExpression());
snapshotCron.setTimeZone(UTC);
Date nextSnapshotDate = snapshotCron.getNextValidTimeAfter(Date.from(now));
nextSnapshotTime =
nextSnapshotDate == null ? Instant.MAX : nextSnapshotDate.toInstant();
} catch (ParseException e) {
nextSnapshotTime = Instant.MAX;
}
return earliest(target, verificationDeadline, nextSnapshotTime);
}
private Instant earliest(Instant... instants) {
return Arrays.stream(instants).min(Instant::compareTo).get();
}
private Void deleteIfEmpty(File dir) {
if (FileUtils.sizeOfDirectory(dir) == 0) FileUtils.deleteQuietly(dir);
return null;
}
@Override
protected void processColumnFamily(File backupDir) throws Exception {
String keyspace = getKeyspace(backupDir);
String columnFamily = getColumnFamily(backupDir);
switch (metaStep) {
case META_GENERATION:
generateMetaFile(keyspace, columnFamily, backupDir)
.ifPresent(this::deleteUploadedFiles);
break;
case UPLOAD_FILES:
uploadAllFiles(backupDir);
break;
default:
throw new Exception("Unknown meta file type: " + metaStep);
}
}
private Optional<ColumnFamilyResult> generateMetaFile(
final String keyspace, final String columnFamily, final File backupDir)
throws Exception {
File snapshotDir = getValidSnapshot(backupDir, snapshotName);
// Process this snapshot folder for the given columnFamily
if (snapshotDir == null) {
logger.warn("{} folder does not contain {} snapshots", backupDir, snapshotName);
return Optional.empty();
}
logger.debug("Scanning for all SSTables in: {}", snapshotDir.getAbsolutePath());
ImmutableSetMultimap.Builder<String, AbstractBackupPath> builder =
ImmutableSetMultimap.builder();
builder.putAll(getSSTables(snapshotDir, AbstractBackupPath.BackupFileType.SST_V2));
ImmutableSetMultimap<String, AbstractBackupPath> sstables = builder.build();
logger.debug("Processing {} sstables from {}.{}", keyspace, columnFamily, sstables.size());
ColumnFamilyResult result =
dataStep.addColumnfamilyResult(keyspace, columnFamily, sstables);
logger.debug("Finished processing KS: {}, CF: {}", keyspace, columnFamily);
return Optional.of(result);
}
private void deleteUploadedFiles(ColumnFamilyResult result) {
result.getSstables()
.stream()
.flatMap(sstable -> sstable.getSstableComponents().stream())
.filter(file -> Boolean.TRUE.equals(file.getIsUploaded()))
.forEach(file -> FileUtils.deleteQuietly(file.getFileName().toFile()));
}
private ImmutableSetMultimap<String, AbstractBackupPath> getSSTables(
File snapshotDir, AbstractBackupPath.BackupFileType type) throws IOException {
ImmutableSetMultimap.Builder<String, AbstractBackupPath> ssTables =
ImmutableSetMultimap.builder();
backupHelper
.getBackupPaths(snapshotDir, type)
.forEach(bp -> getPrefix(bp.getBackupFile()).ifPresent(p -> ssTables.put(p, bp)));
return ssTables.build();
}
/**
* Gives the prefix (common name) of the sstable components. Returns an empty Optional if it is
* not an sstable component or a manifest or schema file.
*
* <p>For example: mc-3-big-Data.db -- mc-3-big ks-cf-ka-7213-Index.db -- ks-cf-ka-7213
*
* @param file the file from which to extract a common prefix.
* @return common prefix of the file, or empty,
*/
private static Optional<String> getPrefix(File file) {
String fileName = file.getName();
String prefix = null;
if (fileName.contains("-")) {
prefix = fileName.substring(0, fileName.lastIndexOf("-"));
} else if (fileName.equalsIgnoreCase(CASSANDRA_MANIFEST_FILE)) {
prefix = "manifest";
} else {
logger.error("Unknown file type with no SSTFileBase found: {}", file.getAbsolutePath());
}
return Optional.ofNullable(prefix);
}
@VisibleForTesting
void setSnapshotName(String snapshotName) {
this.snapshotName = snapshotName;
}
private static void addCallback(ListenableFuture<AbstractBackupPath> future, File snapshotDir) {
FutureCallback<AbstractBackupPath> callback =
new FutureCallback<AbstractBackupPath>() {
@Override
public void onSuccess(AbstractBackupPath result) {}
@Override
public void onFailure(Throwable t) {
logger.error("Error uploading contents of snapshotDir {}", snapshotDir, t);
}
};
Futures.addCallback(future, callback, MoreExecutors.directExecutor());
}
}
| 3,310 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backupv2/ForgottenFilesManager.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backupv2;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.merics.BackupMetrics;
import com.netflix.priam.utils.DateUtil;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.LinkOption;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import java.util.Collection;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import javax.inject.Inject;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.filefilter.FileFilterUtils;
import org.apache.commons.io.filefilter.IOFileFilter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Created by aagrawal on 1/1/19. */
public class ForgottenFilesManager {
private static final Logger logger = LoggerFactory.getLogger(ForgottenFilesManager.class);
private BackupMetrics backupMetrics;
private IConfiguration config;
private static final String TMP_EXT = ".tmp";
private static final Pattern tmpFilePattern =
Pattern.compile("^((.*)\\-(.*)\\-)?tmp(link)?\\-((?:l|k).)\\-(\\d)*\\-(.*)$");
protected static final String LOST_FOUND = "lost+found";
@Inject
public ForgottenFilesManager(IConfiguration configuration, BackupMetrics backupMetrics) {
this.config = configuration;
this.backupMetrics = backupMetrics;
}
public void findAndMoveForgottenFiles(Instant snapshotInstant, File snapshotDir) {
try {
Collection<File> snapshotFiles =
FileUtils.listFiles(snapshotDir, FileFilterUtils.fileFileFilter(), null);
File columnfamilyDir = snapshotDir.getParentFile().getParentFile();
Collection<File> columnfamilyFiles =
getColumnfamilyFiles(snapshotInstant, columnfamilyDir);
// Remove the SSTable(s) which are part of snapshot from the CF file list.
// This cannot be a simple removeAll as snapshot files have "different" file folder
// prefix.
for (File file : snapshotFiles) {
// Get its parent directory file based on this file.
File originalFile = new File(columnfamilyDir, file.getName());
columnfamilyFiles.remove(originalFile);
}
// If there are no "extra" SSTables in CF data folder, we are done.
if (columnfamilyFiles.size() == 0) return;
logger.warn(
"# of potential forgotten files: {} found for CF: {}",
columnfamilyFiles.size(),
columnfamilyDir.getName());
// Move the files to lost_found directory if configured.
moveForgottenFiles(columnfamilyDir, columnfamilyFiles);
} catch (Exception e) {
// Eat the exception, if there, for any reason. This should not stop the snapshot for
// any reason.
logger.error(
"Exception occurred while trying to find forgottenFile. Ignoring the error and continuing with remaining backup",
e);
e.printStackTrace();
}
}
protected Collection<File> getColumnfamilyFiles(Instant snapshotInstant, File columnfamilyDir) {
// Find all the files in columnfamily folder which is :
// 1. Not a temp file.
// 2. Is a file. (we don't care about directories)
// 3. Is older than snapshot time, as new files keep getting created after taking a
// snapshot.
IOFileFilter tmpFileFilter1 = FileFilterUtils.suffixFileFilter(TMP_EXT);
IOFileFilter tmpFileFilter2 =
FileFilterUtils.asFileFilter(
pathname -> tmpFilePattern.matcher(pathname.getName()).matches());
IOFileFilter tmpFileFilter = FileFilterUtils.or(tmpFileFilter1, tmpFileFilter2);
/*
Here we are allowing files which were more than
@link{IConfiguration#getGracePeriodDaysForCompaction}. We do this to allow cassandra
to have files which were generated as part of long running compaction.
Refer to https://issues.apache.org/jira/browse/CASSANDRA-6756 and
https://issues.apache.org/jira/browse/CASSANDRA-7066
for more information.
*/
IOFileFilter ageFilter =
FileFilterUtils.ageFileFilter(
snapshotInstant
.minus(config.getGracePeriodDaysForCompaction(), ChronoUnit.DAYS)
.toEpochMilli());
IOFileFilter fileFilter =
FileFilterUtils.and(
FileFilterUtils.notFileFilter(tmpFileFilter),
FileFilterUtils.fileFileFilter(),
ageFilter);
return FileUtils.listFiles(columnfamilyDir, fileFilter, null);
}
protected void moveForgottenFiles(File columnfamilyDir, Collection<File> columnfamilyFiles)
throws IOException {
// This is a list of potential forgotten file(s). Note that C* might still be using
// files as part of read, so we really do not want to move them until we meet the
// @link{IConfiguration#getForgottenFileGracePeriodDaysForRead} window elapses.
final Path destDir = Paths.get(columnfamilyDir.getAbsolutePath(), LOST_FOUND);
FileUtils.forceMkdir(destDir.toFile());
final Collection<Path> columnfamilyPaths =
columnfamilyFiles
.parallelStream()
.map(file -> Paths.get(file.getAbsolutePath()))
.collect(Collectors.toList());
for (Path file : columnfamilyPaths) {
try {
final Path symbolic_link =
Paths.get(destDir.toFile().getAbsolutePath(), file.toFile().getName());
// Lets see if there is a symbolic link to this file already?
if (!Files.exists(symbolic_link)) {
// If not, lets create one and work on next file.
Files.createSymbolicLink(symbolic_link, file);
continue;
} else if (Files.isSymbolicLink(symbolic_link)) {
// Symbolic link exists, is it older than our timeframe?
Instant last_modified_time =
Files.getLastModifiedTime(symbolic_link, LinkOption.NOFOLLOW_LINKS)
.toInstant();
if (DateUtil.getInstant()
.isAfter(
last_modified_time.plus(
config.getForgottenFileGracePeriodDaysForRead(),
ChronoUnit.DAYS))) {
// Eligible for move.
logger.info(
"Eligible for move: Forgotten file: {} found for CF: {}",
file,
columnfamilyDir.getName());
backupMetrics.incrementForgottenFiles(1);
if (config.isForgottenFileMoveEnabled()) {
try {
// Remove our symbolic link. Note that deletion of symbolic link
// does not remove the original file.
Files.delete(symbolic_link);
FileUtils.moveFileToDirectory(
file.toFile(), destDir.toFile(), true);
logger.warn(
"Successfully moved forgotten file: {} found for CF: {}",
file,
columnfamilyDir.getName());
} catch (IOException e) {
logger.error(
"Exception occurred while trying to move forgottenFile: {}. Ignoring the error and continuing with remaining backup/forgotten files.",
file);
e.printStackTrace();
}
}
}
}
} catch (IOException e) {
logger.error("Forgotten file: Error while trying to process the file: {}", file);
e.printStackTrace();
}
}
// Clean LOST_FOUND directory of any previous symbolic link files which are not considered
// lost any more.
for (File file : FileUtils.listFiles(destDir.toFile(), null, false)) {
Path filePath = Paths.get(file.getAbsolutePath());
if (Files.isSymbolicLink(filePath)) {
Path originalFile = Files.readSymbolicLink(filePath);
if (!columnfamilyPaths.contains(originalFile)) {
Files.delete(filePath);
logger.info(
"Deleting the symbolic link as it is not considered as lost anymore. filePath: {}",
filePath);
}
}
}
}
}
| 3,311 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws/DataPart.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.aws;
import com.netflix.priam.utils.SystemUtils;
/** Class for holding part data of a backup file, which will be used for multi-part uploading */
public class DataPart {
private final String bucketName;
private final String uploadID;
private final String s3key;
private int partNo;
private byte[] partData;
private byte[] md5;
public DataPart(String bucket, String s3key, String mUploadId) {
this.bucketName = bucket;
this.uploadID = mUploadId;
this.s3key = s3key;
}
public DataPart(int partNumber, byte[] data, String bucket, String s3key, String mUploadId) {
this(bucket, s3key, mUploadId);
this.partNo = partNumber;
this.partData = data;
this.md5 = SystemUtils.md5(data);
}
public String getBucketName() {
return bucketName;
}
public String getUploadID() {
return uploadID;
}
public String getS3key() {
return s3key;
}
public int getPartNo() {
return partNo;
}
public byte[] getPartData() {
return partData;
}
public byte[] getMd5() {
return md5;
}
}
| 3,312 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws/SDBInstanceData.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.aws;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.services.simpledb.AmazonSimpleDB;
import com.amazonaws.services.simpledb.AmazonSimpleDBClient;
import com.amazonaws.services.simpledb.model.*;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.cred.ICredential;
import com.netflix.priam.identity.PriamInstance;
import java.util.*;
import javax.inject.Inject;
import javax.inject.Singleton;
/** DAO for handling Instance identity information such as token, zone, region */
@Singleton
public class SDBInstanceData {
public static class Attributes {
public static final String APP_ID = "appId";
public static final String ID = "id";
public static final String INSTANCE_ID = "instanceId";
public static final String TOKEN = "token";
public static final String AVAILABILITY_ZONE = "availabilityZone";
public static final String ELASTIC_IP = "elasticIP";
public static final String UPDATE_TS = "updateTimestamp";
public static final String LOCATION = "location";
public static final String HOSTNAME = "hostname";
}
public static final String DOMAIN = "InstanceIdentity";
public static final String ALL_QUERY =
"select * from " + DOMAIN + " where " + Attributes.APP_ID + "='%s'";
public static final String INSTANCE_QUERY =
"select * from "
+ DOMAIN
+ " where "
+ Attributes.APP_ID
+ "='%s' and "
+ Attributes.LOCATION
+ "='%s' and "
+ Attributes.ID
+ "='%d'";
private final ICredential provider;
private final IConfiguration configuration;
@Inject
public SDBInstanceData(ICredential provider, IConfiguration configuration) {
this.provider = provider;
this.configuration = configuration;
}
/**
* Get the instance details from SimpleDB
*
* @param app Cluster name
* @param id Node ID
* @return the node with the given {@code id}, or {@code null} if no such node exists
*/
public PriamInstance getInstance(String app, String dc, int id) {
AmazonSimpleDB simpleDBClient = getSimpleDBClient();
SelectRequest request =
new SelectRequest(String.format(INSTANCE_QUERY, app, dc, id))
.withConsistentRead(true);
SelectResult result = simpleDBClient.select(request);
if (result.getItems().size() == 0) return null;
return transform(result.getItems().get(0));
}
/**
* Get the set of all nodes in the cluster
*
* @param app Cluster name
* @return the set of all instances in the given {@code app}
*/
public Set<PriamInstance> getAllIds(String app) {
AmazonSimpleDB simpleDBClient = getSimpleDBClient();
Set<PriamInstance> inslist = new HashSet<>();
String nextToken = null;
do {
SelectRequest request =
new SelectRequest(String.format(ALL_QUERY, app))
.withConsistentRead(true)
.withNextToken(nextToken);
SelectResult result = simpleDBClient.select(request);
nextToken = result.getNextToken();
for (Item item : result.getItems()) {
inslist.add(transform(item));
}
} while (nextToken != null);
return inslist;
}
/**
* Create a new instance entry in SimpleDB
*
* @param orig Original instance used for validation
* @param inst Instance entry to be created.
* @throws AmazonServiceException If unable to write to Simple DB because of any error.
*/
public void updateInstance(PriamInstance orig, PriamInstance inst)
throws AmazonServiceException {
PutAttributesRequest putReq =
new PutAttributesRequest(DOMAIN, getKey(inst), createAttributesToRegister(inst))
.withExpected(
new UpdateCondition()
.withName(Attributes.INSTANCE_ID)
.withValue(orig.getInstanceId()))
.withExpected(
new UpdateCondition()
.withName(Attributes.TOKEN)
.withValue(orig.getToken()));
getSimpleDBClient().putAttributes(putReq);
}
/**
* Register a new instance. Registration will fail if a prior entry exists
*
* @param instance Instance entry to be registered.
* @throws AmazonServiceException If unable to write to Simple DB because of any error.
*/
public void registerInstance(PriamInstance instance) throws AmazonServiceException {
AmazonSimpleDB simpleDBClient = getSimpleDBClient();
PutAttributesRequest putReq =
new PutAttributesRequest(
DOMAIN, getKey(instance), createAttributesToRegister(instance));
UpdateCondition expected = new UpdateCondition();
expected.setName(Attributes.INSTANCE_ID);
expected.setExists(false);
putReq.setExpected(expected);
simpleDBClient.putAttributes(putReq);
}
/**
* Deregister instance (same as delete)
*
* @param instance
* @throws AmazonServiceException
*/
public void deregisterInstance(PriamInstance instance) throws AmazonServiceException {
AmazonSimpleDB simpleDBClient = getSimpleDBClient();
DeleteAttributesRequest delReq =
new DeleteAttributesRequest(
DOMAIN, getKey(instance), createAttributesToDeRegister(instance));
simpleDBClient.deleteAttributes(delReq);
}
protected List<ReplaceableAttribute> createAttributesToRegister(PriamInstance instance) {
instance.setUpdatetime(new Date().getTime());
List<ReplaceableAttribute> attrs = new ArrayList<>();
attrs.add(
new ReplaceableAttribute(Attributes.INSTANCE_ID, instance.getInstanceId(), false));
attrs.add(new ReplaceableAttribute(Attributes.TOKEN, instance.getToken(), true));
attrs.add(new ReplaceableAttribute(Attributes.APP_ID, instance.getApp(), true));
attrs.add(
new ReplaceableAttribute(Attributes.ID, Integer.toString(instance.getId()), true));
attrs.add(new ReplaceableAttribute(Attributes.AVAILABILITY_ZONE, instance.getRac(), true));
attrs.add(new ReplaceableAttribute(Attributes.ELASTIC_IP, instance.getHostIP(), true));
attrs.add(new ReplaceableAttribute(Attributes.HOSTNAME, instance.getHostName(), true));
attrs.add(new ReplaceableAttribute(Attributes.LOCATION, instance.getDC(), true));
attrs.add(
new ReplaceableAttribute(
Attributes.UPDATE_TS, Long.toString(instance.getUpdatetime()), true));
return attrs;
}
protected List<Attribute> createAttributesToDeRegister(PriamInstance instance) {
List<Attribute> attrs = new ArrayList<>();
attrs.add(new Attribute(Attributes.INSTANCE_ID, instance.getInstanceId()));
attrs.add(new Attribute(Attributes.TOKEN, instance.getToken()));
attrs.add(new Attribute(Attributes.APP_ID, instance.getApp()));
attrs.add(new Attribute(Attributes.ID, Integer.toString(instance.getId())));
attrs.add(new Attribute(Attributes.AVAILABILITY_ZONE, instance.getRac()));
attrs.add(new Attribute(Attributes.ELASTIC_IP, instance.getHostIP()));
attrs.add(new Attribute(Attributes.HOSTNAME, instance.getHostName()));
attrs.add(new Attribute(Attributes.LOCATION, instance.getDC()));
attrs.add(new Attribute(Attributes.UPDATE_TS, Long.toString(instance.getUpdatetime())));
return attrs;
}
/**
* Convert a simpledb item to PriamInstance
*
* @param item
* @return
*/
public PriamInstance transform(Item item) {
PriamInstance ins = new PriamInstance();
for (Attribute att : item.getAttributes()) {
if (att.getName().equals(Attributes.INSTANCE_ID)) ins.setInstanceId(att.getValue());
else if (att.getName().equals(Attributes.TOKEN)) ins.setToken(att.getValue());
else if (att.getName().equals(Attributes.APP_ID)) ins.setApp(att.getValue());
else if (att.getName().equals(Attributes.ID))
ins.setId(Integer.parseInt(att.getValue()));
else if (att.getName().equals(Attributes.AVAILABILITY_ZONE)) ins.setRac(att.getValue());
else if (att.getName().equals(Attributes.ELASTIC_IP)) ins.setHostIP(att.getValue());
else if (att.getName().equals(Attributes.HOSTNAME)) ins.setHost(att.getValue());
else if (att.getName().equals(Attributes.LOCATION)) ins.setDC(att.getValue());
else if (att.getName().equals(Attributes.UPDATE_TS))
ins.setUpdatetime(Long.parseLong(att.getValue()));
}
return ins;
}
private String getKey(PriamInstance instance) {
return instance.getApp() + "_" + instance.getDC() + "_" + instance.getId();
}
private AmazonSimpleDB getSimpleDBClient() {
// Create per request
return AmazonSimpleDBClient.builder()
.withCredentials(provider.getAwsCredentialProvider())
.withRegion(configuration.getSDBInstanceIdentityRegion())
.build();
}
}
| 3,313 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws/AWSMembership.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.aws;
import com.amazonaws.services.autoscaling.AmazonAutoScaling;
import com.amazonaws.services.autoscaling.AmazonAutoScalingClientBuilder;
import com.amazonaws.services.autoscaling.model.*;
import com.amazonaws.services.autoscaling.model.Instance;
import com.amazonaws.services.ec2.AmazonEC2;
import com.amazonaws.services.ec2.AmazonEC2ClientBuilder;
import com.amazonaws.services.ec2.model.*;
import com.amazonaws.services.ec2.model.Filter;
import com.google.common.collect.ImmutableSet;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.cred.ICredential;
import com.netflix.priam.identity.IMembership;
import com.netflix.priam.identity.config.InstanceInfo;
import java.util.*;
import javax.inject.Inject;
import javax.inject.Named;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class to query amazon ASG for its members to provide - Number of valid nodes in the ASG - Number
* of zones - Methods for adding ACLs for the nodes
*/
public class AWSMembership implements IMembership {
private static final Logger logger = LoggerFactory.getLogger(AWSMembership.class);
private final IConfiguration config;
private final ICredential provider;
private final InstanceInfo instanceInfo;
private final ICredential crossAccountProvider;
@Inject
public AWSMembership(
IConfiguration config,
ICredential provider,
@Named("awsec2roleassumption") ICredential crossAccountProvider,
InstanceInfo instanceInfo) {
this.config = config;
this.provider = provider;
this.instanceInfo = instanceInfo;
this.crossAccountProvider = crossAccountProvider;
}
@Override
public ImmutableSet<String> getRacMembership() {
AmazonAutoScaling client = null;
try {
List<String> asgNames = new ArrayList<>();
asgNames.add(instanceInfo.getAutoScalingGroup());
asgNames.addAll(Arrays.asList(config.getSiblingASGNames().split("\\s*,\\s*")));
client = getAutoScalingClient();
DescribeAutoScalingGroupsRequest asgReq =
new DescribeAutoScalingGroupsRequest()
.withAutoScalingGroupNames(
asgNames.toArray(new String[asgNames.size()]));
DescribeAutoScalingGroupsResult res = client.describeAutoScalingGroups(asgReq);
ImmutableSet.Builder<String> instanceIds = ImmutableSet.builder();
for (AutoScalingGroup asg : res.getAutoScalingGroups()) {
for (Instance ins : asg.getInstances())
if (!(ins.getLifecycleState().equalsIgnoreCase("Terminating")
|| ins.getLifecycleState().equalsIgnoreCase("shutting-down")
|| ins.getLifecycleState().equalsIgnoreCase("Terminated")))
instanceIds.add(ins.getInstanceId());
}
if (logger.isInfoEnabled()) {
logger.info(
String.format(
"Querying Amazon returned following instance in the RAC: %s, ASGs: %s --> %s",
instanceInfo.getRac(),
StringUtils.join(asgNames, ","),
StringUtils.join(instanceIds, ",")));
}
return instanceIds.build();
} finally {
if (client != null) client.shutdown();
}
}
/** Actual membership AWS source of truth... */
@Override
public int getRacMembershipSize() {
AmazonAutoScaling client = null;
try {
client = getAutoScalingClient();
DescribeAutoScalingGroupsRequest asgReq =
new DescribeAutoScalingGroupsRequest()
.withAutoScalingGroupNames(instanceInfo.getAutoScalingGroup());
DescribeAutoScalingGroupsResult res = client.describeAutoScalingGroups(asgReq);
int size = 0;
for (AutoScalingGroup asg : res.getAutoScalingGroups()) {
size += asg.getMaxSize();
}
logger.info("Query on ASG returning {} instances", size);
return size;
} finally {
if (client != null) client.shutdown();
}
}
@Override
public ImmutableSet<String> getCrossAccountRacMembership() {
AmazonAutoScaling client = null;
try {
List<String> asgNames = new ArrayList<>();
asgNames.add(instanceInfo.getAutoScalingGroup());
asgNames.addAll(Arrays.asList(config.getSiblingASGNames().split("\\s*,\\s*")));
client = getCrossAccountAutoScalingClient();
DescribeAutoScalingGroupsRequest asgReq =
new DescribeAutoScalingGroupsRequest()
.withAutoScalingGroupNames(
asgNames.toArray(new String[asgNames.size()]));
DescribeAutoScalingGroupsResult res = client.describeAutoScalingGroups(asgReq);
ImmutableSet.Builder<String> instanceIds = ImmutableSet.builder();
for (AutoScalingGroup asg : res.getAutoScalingGroups()) {
for (Instance ins : asg.getInstances())
if (!(ins.getLifecycleState().equalsIgnoreCase("Terminating")
|| ins.getLifecycleState().equalsIgnoreCase("shutting-down")
|| ins.getLifecycleState().equalsIgnoreCase("Terminated")))
instanceIds.add(ins.getInstanceId());
}
if (logger.isInfoEnabled()) {
logger.info(
String.format(
"Querying Amazon returned following instance in the cross-account ASG: %s --> %s",
instanceInfo.getRac(), StringUtils.join(instanceIds, ",")));
}
return instanceIds.build();
} finally {
if (client != null) client.shutdown();
}
}
@Override
public int getRacCount() {
return config.getRacs().size();
}
private boolean isClassic() {
return instanceInfo.getInstanceEnvironment() == InstanceInfo.InstanceEnvironment.CLASSIC;
}
/**
* Adding peers' IPs as ingress to the running instance SG. The running instance could be in
* "classic" or "vpc"
*/
public void addACL(Collection<String> listIPs, int from, int to) {
AmazonEC2 client = null;
try {
client = getEc2Client();
List<IpPermission> ipPermissions = new ArrayList<>();
ipPermissions.add(
new IpPermission()
.withFromPort(from)
.withIpProtocol("tcp")
.withIpRanges(listIPs)
.withToPort(to));
if (isClassic()) {
client.authorizeSecurityGroupIngress(
new AuthorizeSecurityGroupIngressRequest(
config.getACLGroupName(), ipPermissions));
if (logger.isInfoEnabled()) {
logger.info("Done adding ACL to classic: " + StringUtils.join(listIPs, ","));
}
} else {
AuthorizeSecurityGroupIngressRequest sgIngressRequest =
new AuthorizeSecurityGroupIngressRequest();
sgIngressRequest.withGroupId(getVpcGoupId());
// fetch SG group id for vpc account of the running instance.
client.authorizeSecurityGroupIngress(
sgIngressRequest.withIpPermissions(
ipPermissions)); // Adding peers' IPs as ingress to the running
// instance SG
if (logger.isInfoEnabled()) {
logger.info("Done adding ACL to vpc: " + StringUtils.join(listIPs, ","));
}
}
} finally {
if (client != null) client.shutdown();
}
}
/*
* @return SG group id for a group name, vpc account of the running instance.
*/
protected String getVpcGoupId() {
AmazonEC2 client = null;
try {
client = getEc2Client();
Filter nameFilter =
new Filter().withName("group-name").withValues(config.getACLGroupName()); // SG
Filter vpcFilter = new Filter().withName("vpc-id").withValues(instanceInfo.getVpcId());
DescribeSecurityGroupsRequest req =
new DescribeSecurityGroupsRequest().withFilters(nameFilter, vpcFilter);
DescribeSecurityGroupsResult result = client.describeSecurityGroups(req);
for (SecurityGroup group : result.getSecurityGroups()) {
logger.debug(
"got group-id:{} for group-name:{},vpc-id:{}",
group.getGroupId(),
config.getACLGroupName(),
instanceInfo.getVpcId());
return group.getGroupId();
}
logger.error(
"unable to get group-id for group-name={} vpc-id={}",
config.getACLGroupName(),
instanceInfo.getVpcId());
return "";
} finally {
if (client != null) client.shutdown();
}
}
/** removes a iplist from the SG */
public void removeACL(Collection<String> listIPs, int from, int to) {
AmazonEC2 client = null;
try {
client = getEc2Client();
List<IpPermission> ipPermissions = new ArrayList<>();
ipPermissions.add(
new IpPermission()
.withFromPort(from)
.withIpProtocol("tcp")
.withIpRanges(listIPs)
.withToPort(to));
if (isClassic()) {
client.revokeSecurityGroupIngress(
new RevokeSecurityGroupIngressRequest(
config.getACLGroupName(), ipPermissions));
if (logger.isInfoEnabled()) {
logger.info(
"Done removing from ACL within classic env for running instance: "
+ StringUtils.join(listIPs, ","));
}
} else {
RevokeSecurityGroupIngressRequest req = new RevokeSecurityGroupIngressRequest();
// fetch SG group id for vpc account of the running instance.
req.withGroupId(getVpcGoupId());
// Adding peers' IPs as ingress to the running instance SG
client.revokeSecurityGroupIngress(req.withIpPermissions(ipPermissions));
if (logger.isInfoEnabled()) {
logger.info(
"Done removing from ACL within vpc env for running instance: "
+ StringUtils.join(listIPs, ","));
}
}
} finally {
if (client != null) client.shutdown();
}
}
/** List SG ACL's */
public ImmutableSet<String> listACL(int from, int to) {
AmazonEC2 client = null;
try {
client = getEc2Client();
ImmutableSet.Builder<String> ipPermissions = ImmutableSet.builder();
if (isClassic()) {
DescribeSecurityGroupsRequest req =
new DescribeSecurityGroupsRequest()
.withGroupNames(
Collections.singletonList(config.getACLGroupName()));
DescribeSecurityGroupsResult result = client.describeSecurityGroups(req);
for (SecurityGroup group : result.getSecurityGroups())
for (IpPermission perm : group.getIpPermissions())
if (perm.getFromPort() == from && perm.getToPort() == to)
ipPermissions.addAll(perm.getIpRanges());
logger.debug("Fetch current permissions for classic env of running instance");
} else {
Filter nameFilter =
new Filter().withName("group-name").withValues(config.getACLGroupName());
String vpcid = instanceInfo.getVpcId();
if (vpcid == null || vpcid.isEmpty()) {
throw new IllegalStateException(
"vpcid is null even though instance is running in vpc.");
}
// only fetch SG for the vpc id of the running instance
Filter vpcFilter = new Filter().withName("vpc-id").withValues(vpcid);
DescribeSecurityGroupsRequest req =
new DescribeSecurityGroupsRequest().withFilters(nameFilter, vpcFilter);
DescribeSecurityGroupsResult result = client.describeSecurityGroups(req);
for (SecurityGroup group : result.getSecurityGroups())
for (IpPermission perm : group.getIpPermissions())
if (perm.getFromPort() == from && perm.getToPort() == to)
ipPermissions.addAll(perm.getIpRanges());
logger.debug("Fetch current permissions for vpc env of running instance");
}
return ipPermissions.build();
} finally {
if (client != null) client.shutdown();
}
}
@Override
public void expandRacMembership(int count) {
AmazonAutoScaling client = null;
try {
client = getAutoScalingClient();
DescribeAutoScalingGroupsRequest asgReq =
new DescribeAutoScalingGroupsRequest()
.withAutoScalingGroupNames(instanceInfo.getAutoScalingGroup());
DescribeAutoScalingGroupsResult res = client.describeAutoScalingGroups(asgReq);
AutoScalingGroup asg = res.getAutoScalingGroups().get(0);
UpdateAutoScalingGroupRequest ureq = new UpdateAutoScalingGroupRequest();
ureq.setAutoScalingGroupName(asg.getAutoScalingGroupName());
ureq.setMinSize(asg.getMinSize() + 1);
ureq.setMaxSize(asg.getMinSize() + 1);
ureq.setDesiredCapacity(asg.getMinSize() + 1);
client.updateAutoScalingGroup(ureq);
} finally {
if (client != null) client.shutdown();
}
}
protected AmazonAutoScaling getAutoScalingClient() {
return AmazonAutoScalingClientBuilder.standard()
.withCredentials(provider.getAwsCredentialProvider())
.withRegion(instanceInfo.getRegion())
.build();
}
protected AmazonAutoScaling getCrossAccountAutoScalingClient() {
return AmazonAutoScalingClientBuilder.standard()
.withCredentials(crossAccountProvider.getAwsCredentialProvider())
.withRegion(instanceInfo.getRegion())
.build();
}
protected AmazonEC2 getEc2Client() {
return AmazonEC2ClientBuilder.standard()
.withCredentials(provider.getAwsCredentialProvider())
.withRegion(instanceInfo.getRegion())
.build();
}
}
| 3,314 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws/S3EncryptedFileSystem.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.aws;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.model.CompleteMultipartUploadResult;
import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
import com.amazonaws.services.s3.model.InitiateMultipartUploadResult;
import com.amazonaws.services.s3.model.PartETag;
import com.netflix.priam.backup.AbstractBackupPath;
import com.netflix.priam.backup.BackupRestoreException;
import com.netflix.priam.backup.DynamicRateLimiter;
import com.netflix.priam.backup.RangeReadInputStream;
import com.netflix.priam.compress.ChunkedStream;
import com.netflix.priam.compress.ICompression;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.cred.ICredential;
import com.netflix.priam.cryptography.IFileCryptography;
import com.netflix.priam.identity.config.InstanceInfo;
import com.netflix.priam.merics.BackupMetrics;
import com.netflix.priam.notification.BackupNotificationMgr;
import java.io.*;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Provider;
import javax.inject.Singleton;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Implementation of IBackupFileSystem for S3. The upload/download will work with ciphertext. */
@Singleton
public class S3EncryptedFileSystem extends S3FileSystemBase {
private static final Logger logger = LoggerFactory.getLogger(S3EncryptedFileSystem.class);
private final IFileCryptography encryptor;
private final DynamicRateLimiter dynamicRateLimiter;
@Inject
public S3EncryptedFileSystem(
Provider<AbstractBackupPath> pathProvider,
ICompression compress,
final IConfiguration config,
ICredential cred,
@Named("filecryptoalgorithm") IFileCryptography fileCryptography,
BackupMetrics backupMetrics,
BackupNotificationMgr backupNotificationMgr,
InstanceInfo instanceInfo,
DynamicRateLimiter dynamicRateLimiter) {
super(pathProvider, compress, config, backupMetrics, backupNotificationMgr);
this.encryptor = fileCryptography;
this.dynamicRateLimiter = dynamicRateLimiter;
super.s3Client =
AmazonS3Client.builder()
.withCredentials(cred.getAwsCredentialProvider())
.withRegion(instanceInfo.getRegion())
.build();
}
@Override
protected void downloadFileImpl(AbstractBackupPath path, String suffix)
throws BackupRestoreException {
String remotePath = path.getRemotePath();
Path localPath = Paths.get(path.newRestoreFile().getAbsolutePath() + suffix);
try (OutputStream os = new FileOutputStream(localPath.toFile());
RangeReadInputStream rris =
new RangeReadInputStream(
s3Client, getShard(), super.getFileSize(remotePath), remotePath)) {
/*
* To handle use cases where decompression should be done outside of the download. For example, the file have been compressed and then encrypted.
* Hence, decompressing it here would compromise the decryption.
*/
IOUtils.copyLarge(rris, os);
} catch (Exception e) {
throw new BackupRestoreException(
"Exception encountered downloading "
+ remotePath
+ " from S3 bucket "
+ getShard()
+ ", Msg: "
+ e.getMessage(),
e);
}
}
@Override
protected long uploadFileImpl(AbstractBackupPath path, Instant target)
throws BackupRestoreException {
Path localPath = Paths.get(path.getBackupFile().getAbsolutePath());
String remotePath = path.getRemotePath();
long chunkSize = getChunkSize(localPath);
// initialize chunking request to aws
InitiateMultipartUploadRequest initRequest =
new InitiateMultipartUploadRequest(config.getBackupPrefix(), remotePath);
// Fetch the aws generated upload id for this chunking request
InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
DataPart part =
new DataPart(config.getBackupPrefix(), remotePath, initResponse.getUploadId());
// Metadata on number of parts to be uploaded
List<PartETag> partETags = Collections.synchronizedList(new ArrayList<>());
// Read chunks from src, compress it, and write to temp file
File compressedDstFile = new File(localPath.toString() + ".compressed");
if (logger.isDebugEnabled())
logger.debug(
"Compressing {} with chunk size {}",
compressedDstFile.getAbsolutePath(),
chunkSize);
try (InputStream in = new FileInputStream(localPath.toFile());
BufferedOutputStream compressedBos =
new BufferedOutputStream(new FileOutputStream(compressedDstFile))) {
Iterator<byte[]> compressedChunks =
new ChunkedStream(in, chunkSize, path.getCompression());
while (compressedChunks.hasNext()) {
byte[] compressedChunk = compressedChunks.next();
compressedBos.write(compressedChunk);
}
} catch (Exception e) {
String message =
"Exception in compressing the input data during upload to EncryptedStore Msg: "
+ e.getMessage();
logger.error(message, e);
throw new BackupRestoreException(message);
}
// == Read compressed data, encrypt each chunk, upload it to aws
try (BufferedInputStream compressedBis =
new BufferedInputStream(new FileInputStream(compressedDstFile))) {
Iterator<byte[]> chunks = this.encryptor.encryptStream(compressedBis, remotePath);
// identifies this part position in the object we are uploading
int partNum = 0;
long encryptedFileSize = 0;
while (chunks.hasNext()) {
byte[] chunk = chunks.next();
// throttle upload to endpoint
rateLimiter.acquire(chunk.length);
dynamicRateLimiter.acquire(path, target, chunk.length);
DataPart dp =
new DataPart(
++partNum,
chunk,
config.getBackupPrefix(),
remotePath,
initResponse.getUploadId());
S3PartUploader partUploader = new S3PartUploader(s3Client, dp, partETags);
encryptedFileSize += chunk.length;
executor.submit(partUploader);
}
executor.sleepTillEmpty();
if (partNum != partETags.size()) {
throw new BackupRestoreException(
"Number of parts("
+ partNum
+ ") does not match the expected number of uploaded parts("
+ partETags.size()
+ ")");
}
// complete the aws chunking upload by providing to aws the ETag that uniquely
// identifies the combined object datav
CompleteMultipartUploadResult resultS3MultiPartUploadComplete =
new S3PartUploader(s3Client, part, partETags).completeUpload();
checkSuccessfulUpload(resultS3MultiPartUploadComplete, localPath);
return encryptedFileSize;
} catch (Exception e) {
new S3PartUploader(s3Client, part, partETags).abortUpload();
throw new BackupRestoreException("Error uploading file: " + localPath, e);
} finally {
if (compressedDstFile.exists()) compressedDstFile.delete();
}
}
}
| 3,315 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws/S3FileSystem.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.aws;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.S3ResponseMetadata;
import com.amazonaws.services.s3.model.*;
import com.google.common.base.Preconditions;
import com.netflix.priam.aws.auth.IS3Credential;
import com.netflix.priam.backup.AbstractBackupPath;
import com.netflix.priam.backup.BackupRestoreException;
import com.netflix.priam.backup.DynamicRateLimiter;
import com.netflix.priam.backup.RangeReadInputStream;
import com.netflix.priam.compress.ChunkedStream;
import com.netflix.priam.compress.CompressionType;
import com.netflix.priam.compress.ICompression;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.identity.config.InstanceInfo;
import com.netflix.priam.merics.BackupMetrics;
import com.netflix.priam.notification.BackupNotificationMgr;
import com.netflix.priam.utils.BoundedExponentialRetryCallable;
import com.netflix.priam.utils.SystemUtils;
import java.io.*;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Provider;
import javax.inject.Singleton;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Implementation of IBackupFileSystem for S3 */
@Singleton
public class S3FileSystem extends S3FileSystemBase {
private static final Logger logger = LoggerFactory.getLogger(S3FileSystem.class);
private static final long MAX_BUFFER_SIZE = 5L * 1024L * 1024L;
private final DynamicRateLimiter dynamicRateLimiter;
@Inject
public S3FileSystem(
@Named("awss3roleassumption") IS3Credential cred,
Provider<AbstractBackupPath> pathProvider,
ICompression compress,
final IConfiguration config,
BackupMetrics backupMetrics,
BackupNotificationMgr backupNotificationMgr,
InstanceInfo instanceInfo,
DynamicRateLimiter dynamicRateLimiter) {
super(pathProvider, compress, config, backupMetrics, backupNotificationMgr);
s3Client =
AmazonS3Client.builder()
.withCredentials(cred.getAwsCredentialProvider())
.withRegion(instanceInfo.getRegion())
.build();
this.dynamicRateLimiter = dynamicRateLimiter;
}
@Override
protected void downloadFileImpl(AbstractBackupPath path, String suffix)
throws BackupRestoreException {
String remotePath = path.getRemotePath();
File localFile = new File(path.newRestoreFile().getAbsolutePath() + suffix);
long size = super.getFileSize(remotePath);
final int bufferSize = Math.toIntExact(Math.min(MAX_BUFFER_SIZE, size));
try (BufferedInputStream is =
new BufferedInputStream(
new RangeReadInputStream(s3Client, getShard(), size, remotePath),
bufferSize);
BufferedOutputStream os =
new BufferedOutputStream(new FileOutputStream(localFile))) {
if (path.getCompression() == CompressionType.NONE) {
IOUtils.copyLarge(is, os);
} else {
compress.decompressAndClose(is, os);
}
} catch (Exception e) {
String err =
String.format(
"Failed to GET %s Bucket: %s Msg: %s",
remotePath, getShard(), e.getMessage());
throw new BackupRestoreException(err);
}
}
private ObjectMetadata getObjectMetadata(File file) {
ObjectMetadata ret = new ObjectMetadata();
long lastModified = file.lastModified();
if (lastModified != 0) {
ret.addUserMetadata("local-modification-time", Long.toString(lastModified));
}
long fileSize = file.length();
if (fileSize != 0) {
ret.addUserMetadata("local-size", Long.toString(fileSize));
}
return ret;
}
private long uploadMultipart(AbstractBackupPath path, Instant target)
throws BackupRestoreException {
Path localPath = Paths.get(path.getBackupFile().getAbsolutePath());
String remotePath = path.getRemotePath();
long chunkSize = getChunkSize(localPath);
String prefix = config.getBackupPrefix();
if (logger.isDebugEnabled())
logger.debug("Uploading to {}/{} with chunk size {}", prefix, remotePath, chunkSize);
File localFile = localPath.toFile();
InitiateMultipartUploadRequest initRequest =
new InitiateMultipartUploadRequest(prefix, remotePath)
.withObjectMetadata(getObjectMetadata(localFile));
String uploadId = s3Client.initiateMultipartUpload(initRequest).getUploadId();
DataPart part = new DataPart(prefix, remotePath, uploadId);
List<PartETag> partETags = Collections.synchronizedList(new ArrayList<>());
try (InputStream in = new FileInputStream(localFile)) {
Iterator<byte[]> chunks = new ChunkedStream(in, chunkSize, path.getCompression());
int partNum = 0;
AtomicInteger partsPut = new AtomicInteger(0);
long compressedFileSize = 0;
while (chunks.hasNext()) {
byte[] chunk = chunks.next();
rateLimiter.acquire(chunk.length);
dynamicRateLimiter.acquire(path, target, chunk.length);
DataPart dp = new DataPart(++partNum, chunk, prefix, remotePath, uploadId);
S3PartUploader partUploader = new S3PartUploader(s3Client, dp, partETags, partsPut);
compressedFileSize += chunk.length;
// TODO: output Future<Etag> instead, collect them here, wait for all below
executor.submit(partUploader);
}
executor.sleepTillEmpty();
logger.info("{} done. part count: {} expected: {}", localFile, partsPut.get(), partNum);
Preconditions.checkState(partNum == partETags.size(), "part count mismatch");
CompleteMultipartUploadResult resultS3MultiPartUploadComplete =
new S3PartUploader(s3Client, part, partETags).completeUpload();
checkSuccessfulUpload(resultS3MultiPartUploadComplete, localPath);
if (logger.isDebugEnabled()) {
final S3ResponseMetadata info = s3Client.getCachedResponseMetadata(initRequest);
logger.debug("Request Id: {}, Host Id: {}", info.getRequestId(), info.getHostId());
}
return compressedFileSize;
} catch (Exception e) {
new S3PartUploader(s3Client, part, partETags).abortUpload();
throw new BackupRestoreException("Error uploading file: " + localPath.toString(), e);
}
}
protected long uploadFileImpl(AbstractBackupPath path, Instant target)
throws BackupRestoreException {
File localFile = Paths.get(path.getBackupFile().getAbsolutePath()).toFile();
if (localFile.length() >= config.getBackupChunkSize()) return uploadMultipart(path, target);
byte[] chunk = getFileContents(path);
rateLimiter.acquire(chunk.length);
dynamicRateLimiter.acquire(path, target, chunk.length);
try {
new BoundedExponentialRetryCallable<PutObjectResult>(1000, 10000, 5) {
@Override
public PutObjectResult retriableCall() {
return s3Client.putObject(generatePut(path, chunk));
}
}.call();
} catch (Exception e) {
throw new BackupRestoreException("Error uploading file: " + localFile.getName(), e);
}
return chunk.length;
}
private PutObjectRequest generatePut(AbstractBackupPath path, byte[] chunk) {
File localFile = Paths.get(path.getBackupFile().getAbsolutePath()).toFile();
ObjectMetadata metadata = getObjectMetadata(localFile);
metadata.setContentLength(chunk.length);
PutObjectRequest put =
new PutObjectRequest(
config.getBackupPrefix(),
path.getRemotePath(),
new ByteArrayInputStream(chunk),
metadata);
if (config.addMD5ToBackupUploads()) {
put.getMetadata().setContentMD5(SystemUtils.toBase64(SystemUtils.md5(chunk)));
}
return put;
}
private byte[] getFileContents(AbstractBackupPath path) throws BackupRestoreException {
File localFile = Paths.get(path.getBackupFile().getAbsolutePath()).toFile();
try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
InputStream in = new BufferedInputStream(new FileInputStream(localFile))) {
Iterator<byte[]> chunks =
new ChunkedStream(in, config.getBackupChunkSize(), path.getCompression());
while (chunks.hasNext()) {
byteArrayOutputStream.write(chunks.next());
}
return byteArrayOutputStream.toByteArray();
} catch (Exception e) {
throw new BackupRestoreException("Error reading file: " + localFile.getName(), e);
}
}
}
| 3,316 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws/S3PartUploader.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.aws;
import com.amazonaws.AmazonClientException;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.*;
import com.netflix.priam.backup.BackupRestoreException;
import com.netflix.priam.utils.BoundedExponentialRetryCallable;
import com.netflix.priam.utils.SystemUtils;
import java.io.ByteArrayInputStream;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class S3PartUploader extends BoundedExponentialRetryCallable<Void> {
private final AmazonS3 client;
private final DataPart dataPart;
private final List<PartETag> partETags;
private AtomicInteger partsUploaded = null; // num of data parts successfully uploaded
private static final Logger logger = LoggerFactory.getLogger(S3PartUploader.class);
private static final int MAX_RETRIES = 5;
private static final int DEFAULT_MIN_SLEEP_MS = 200;
public S3PartUploader(AmazonS3 client, DataPart dp, List<PartETag> partETags) {
super(DEFAULT_MIN_SLEEP_MS, BoundedExponentialRetryCallable.MAX_SLEEP, MAX_RETRIES);
this.client = client;
this.dataPart = dp;
this.partETags = partETags;
}
public S3PartUploader(
AmazonS3 client, DataPart dp, List<PartETag> partETags, AtomicInteger partsUploaded) {
super(DEFAULT_MIN_SLEEP_MS, BoundedExponentialRetryCallable.MAX_SLEEP, MAX_RETRIES);
this.client = client;
this.dataPart = dp;
this.partETags = partETags;
this.partsUploaded = partsUploaded;
}
private Void uploadPart() throws AmazonClientException, BackupRestoreException {
UploadPartRequest req = new UploadPartRequest();
req.setBucketName(dataPart.getBucketName());
req.setKey(dataPart.getS3key());
req.setUploadId(dataPart.getUploadID());
req.setPartNumber(dataPart.getPartNo());
req.setPartSize(dataPart.getPartData().length);
req.setMd5Digest(SystemUtils.toBase64(dataPart.getMd5()));
req.setInputStream(new ByteArrayInputStream(dataPart.getPartData()));
UploadPartResult res = client.uploadPart(req);
PartETag partETag = res.getPartETag();
if (!partETag.getETag().equals(SystemUtils.toHex(dataPart.getMd5())))
throw new BackupRestoreException(
"Unable to match MD5 for part " + dataPart.getPartNo());
partETags.add(partETag);
if (this.partsUploaded != null) this.partsUploaded.incrementAndGet();
return null;
}
public CompleteMultipartUploadResult completeUpload() throws BackupRestoreException {
CompleteMultipartUploadRequest compRequest =
new CompleteMultipartUploadRequest(
dataPart.getBucketName(),
dataPart.getS3key(),
dataPart.getUploadID(),
partETags);
return client.completeMultipartUpload(compRequest);
}
// Abort
public void abortUpload() {
AbortMultipartUploadRequest abortRequest =
new AbortMultipartUploadRequest(
dataPart.getBucketName(), dataPart.getS3key(), dataPart.getUploadID());
client.abortMultipartUpload(abortRequest);
}
@Override
public Void retriableCall() throws AmazonClientException, BackupRestoreException {
logger.debug(
"Picked up part {} size {}", dataPart.getPartNo(), dataPart.getPartData().length);
return uploadPart();
}
}
| 3,317 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws/UpdateCleanupPolicy.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.aws;
import com.netflix.priam.backup.IBackupFileSystem;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.scheduler.SimpleTimer;
import com.netflix.priam.scheduler.Task;
import com.netflix.priam.scheduler.TaskTimer;
import com.netflix.priam.utils.RetryableCallable;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Singleton;
/** Updates the cleanup policy for the bucket */
@Singleton
public class UpdateCleanupPolicy extends Task {
public static final String JOBNAME = "UpdateCleanupPolicy";
private final IBackupFileSystem fs;
@Inject
public UpdateCleanupPolicy(IConfiguration config, @Named("backup") IBackupFileSystem fs) {
super(config);
this.fs = fs;
}
@Override
public void execute() throws Exception {
// Set cleanup policy of retention is specified
new RetryableCallable<Void>() {
@Override
public Void retriableCall() throws Exception {
fs.cleanup();
return null;
}
}.call();
}
@Override
public String getName() {
return JOBNAME;
}
public static TaskTimer getTimer() {
return new SimpleTimer(JOBNAME);
}
}
| 3,318 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws/S3FileSystemBase.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.aws;
import com.amazonaws.AmazonClientException;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.BucketLifecycleConfiguration;
import com.amazonaws.services.s3.model.BucketLifecycleConfiguration.Rule;
import com.amazonaws.services.s3.model.CompleteMultipartUploadResult;
import com.amazonaws.services.s3.model.DeleteObjectsRequest;
import com.amazonaws.services.s3.model.lifecycle.*;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.RateLimiter;
import com.netflix.priam.backup.AbstractBackupPath;
import com.netflix.priam.backup.AbstractFileSystem;
import com.netflix.priam.backup.BackupRestoreException;
import com.netflix.priam.compress.ICompression;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.merics.BackupMetrics;
import com.netflix.priam.notification.BackupNotificationMgr;
import com.netflix.priam.scheduler.BlockingSubmitThreadPoolExecutor;
import java.nio.file.Path;
import java.util.Iterator;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.stream.Collectors;
import javax.inject.Provider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class S3FileSystemBase extends AbstractFileSystem {
private static final int MAX_CHUNKS = 9995; // 10K is AWS limit, minus a small buffer
private static final Logger logger = LoggerFactory.getLogger(S3FileSystemBase.class);
AmazonS3 s3Client;
final IConfiguration config;
final ICompression compress;
final BlockingSubmitThreadPoolExecutor executor;
final RateLimiter rateLimiter;
private final RateLimiter objectExistLimiter;
S3FileSystemBase(
Provider<AbstractBackupPath> pathProvider,
ICompression compress,
final IConfiguration config,
BackupMetrics backupMetrics,
BackupNotificationMgr backupNotificationMgr) {
super(config, backupMetrics, backupNotificationMgr, pathProvider);
this.compress = compress;
this.config = config;
int threads = config.getBackupThreads();
LinkedBlockingQueue<Runnable> queue = new LinkedBlockingQueue<>(threads);
this.executor =
new BlockingSubmitThreadPoolExecutor(threads, queue, config.getUploadTimeout());
// a throttling mechanism, we can limit the amount of bytes uploaded to endpoint per second.
this.rateLimiter = RateLimiter.create(1);
// a throttling mechanism, we can limit the amount of S3 API calls endpoint per second.
this.objectExistLimiter = RateLimiter.create(1);
configChangeListener();
}
/*
Call this method to change the configuration in runtime via callback.
*/
public void configChangeListener() {
int objectExistLimit = config.getRemoteFileSystemObjectExistsThrottle();
objectExistLimiter.setRate(objectExistLimit < 1 ? Double.MAX_VALUE : objectExistLimit);
double throttleLimit = config.getUploadThrottle();
rateLimiter.setRate(throttleLimit < 1 ? Double.MAX_VALUE : throttleLimit);
logger.info(
"Updating rateLimiters: s3UploadThrottle: {}, objectExistLimiter: {}",
rateLimiter.getRate(),
objectExistLimiter.getRate());
}
private AmazonS3 getS3Client() {
return s3Client;
}
/*
* A means to change the default handle to the S3 client.
*/
public void setS3Client(AmazonS3 client) {
s3Client = client;
}
@Override
public void cleanup() {
AmazonS3 s3Client = getS3Client();
String clusterPath = pathProvider.get().clusterPrefix("");
logger.debug("Bucket: {}", config.getBackupPrefix());
BucketLifecycleConfiguration lifeConfig =
s3Client.getBucketLifecycleConfiguration(config.getBackupPrefix());
logger.debug("Got bucket:{} lifecycle.{}", config.getBackupPrefix(), lifeConfig);
if (lifeConfig == null) {
lifeConfig = new BucketLifecycleConfiguration();
List<Rule> rules = Lists.newArrayList();
lifeConfig.setRules(rules);
}
List<Rule> rules = lifeConfig.getRules();
if (updateLifecycleRule(config, rules, clusterPath)) {
if (rules.size() > 0) {
lifeConfig.setRules(rules);
s3Client.setBucketLifecycleConfiguration(config.getBackupPrefix(), lifeConfig);
} else s3Client.deleteBucketLifecycleConfiguration(config.getBackupPrefix());
}
}
// Dummy class to get Prefix. - Why oh why AWS you can't give the details!!
private class PrefixVisitor implements LifecyclePredicateVisitor {
String prefix;
@Override
public void visit(LifecyclePrefixPredicate lifecyclePrefixPredicate) {
prefix = lifecyclePrefixPredicate.getPrefix();
}
@Override
public void visit(LifecycleTagPredicate lifecycleTagPredicate) {}
@Override
public void visit(
LifecycleObjectSizeGreaterThanPredicate lifecycleObjectSizeGreaterThanPredicate) {}
@Override
public void visit(LifecycleAndOperator lifecycleAndOperator) {}
@Override
public void visit(
LifecycleObjectSizeLessThanPredicate lifecycleObjectSizeLessThanPredicate) {}
}
private Optional<Rule> getBucketLifecycleRule(List<Rule> rules, String prefix) {
if (rules == null || rules.isEmpty()) return Optional.empty();
for (Rule rule : rules) {
String rulePrefix = "";
if (rule.getFilter() != null) {
PrefixVisitor prefixVisitor = new PrefixVisitor();
rule.getFilter().getPredicate().accept(prefixVisitor);
rulePrefix = prefixVisitor.prefix;
} else if (rule.getPrefix() != null) {
// Being backwards compatible, here.
rulePrefix = rule.getPrefix();
}
if (prefix.equalsIgnoreCase(rulePrefix)) {
return Optional.of(rule);
}
}
return Optional.empty();
}
private boolean updateLifecycleRule(IConfiguration config, List<Rule> rules, String prefix) {
Optional<Rule> rule = getBucketLifecycleRule(rules, prefix);
// No need to update the rule as it never existed and retention is not set.
if (!rule.isPresent() && config.getBackupRetentionDays() <= 0) return false;
// Rule not required as retention days is zero or negative.
if (rule.isPresent() && config.getBackupRetentionDays() <= 0) {
logger.warn(
"Removing the rule for backup retention on prefix: {} as retention is set to [{}] days. Only positive values are supported by S3!!",
prefix,
config.getBackupRetentionDays());
rules.remove(rule.get());
return true;
}
// Rule present and is current.
if (rule.isPresent()
&& rule.get().getExpirationInDays() == config.getBackupRetentionDays()
&& rule.get().getStatus().equalsIgnoreCase(BucketLifecycleConfiguration.ENABLED)) {
logger.info(
"Cleanup rule already set on prefix: {} with retention period: [{}] days",
prefix,
config.getBackupRetentionDays());
return false;
}
if (!rule.isPresent()) {
// Create a new rule
rule = Optional.of(new BucketLifecycleConfiguration.Rule());
rules.add(rule.get());
}
rule.get().setStatus(BucketLifecycleConfiguration.ENABLED);
rule.get().setExpirationInDays(config.getBackupRetentionDays());
rule.get().setFilter(new LifecycleFilter(new LifecyclePrefixPredicate(prefix)));
rule.get().setId(prefix);
logger.info(
"Setting cleanup rule for prefix: {} with retention period: [{}] days",
prefix,
config.getBackupRetentionDays());
return true;
}
void checkSuccessfulUpload(
CompleteMultipartUploadResult resultS3MultiPartUploadComplete, Path localPath)
throws BackupRestoreException {
if (null != resultS3MultiPartUploadComplete
&& null != resultS3MultiPartUploadComplete.getETag()) {
logger.info(
"Uploaded file: {}, object eTag: {}",
localPath,
resultS3MultiPartUploadComplete.getETag());
} else {
throw new BackupRestoreException(
"Error uploading file as ETag or CompleteMultipartUploadResult is NULL -"
+ localPath);
}
}
@Override
public long getFileSize(String remotePath) throws BackupRestoreException {
return s3Client.getObjectMetadata(getShard(), remotePath).getContentLength();
}
@Override
protected boolean doesRemoteFileExist(Path remotePath) {
objectExistLimiter.acquire();
boolean exists = false;
try {
exists = s3Client.doesObjectExist(getShard(), remotePath.toString());
} catch (AmazonClientException ex) {
// No point throwing this exception up.
logger.error(
"Exception while checking existence of object: {}. Error: {}",
remotePath,
ex.getMessage());
}
return exists;
}
@Override
public void shutdown() {
if (executor != null) executor.shutdown();
}
@Override
public Iterator<String> listFileSystem(String prefix, String delimiter, String marker) {
return new S3Iterator(s3Client, getShard(), prefix, delimiter, marker);
}
@Override
public void deleteFiles(List<Path> remotePaths) throws BackupRestoreException {
if (remotePaths.isEmpty()) return;
try {
List<DeleteObjectsRequest.KeyVersion> keys =
remotePaths
.stream()
.map(
remotePath ->
new DeleteObjectsRequest.KeyVersion(
remotePath.toString()))
.collect(Collectors.toList());
s3Client.deleteObjects(
new DeleteObjectsRequest(getShard()).withKeys(keys).withQuiet(true));
logger.info("Deleted {} objects from S3", remotePaths.size());
} catch (Exception e) {
logger.error(
"Error while trying to delete [{}] the objects from S3: {}",
remotePaths.size(),
e.getMessage());
throw new BackupRestoreException(e + " while trying to delete the objects");
}
}
final long getChunkSize(Path path) {
return Math.max(path.toFile().length() / MAX_CHUNKS, config.getBackupChunkSize());
}
}
| 3,319 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws/S3Iterator.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.aws;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.ListObjectsRequest;
import com.amazonaws.services.s3.model.ObjectListing;
import com.amazonaws.services.s3.model.S3ObjectSummary;
import com.google.common.collect.Lists;
import java.util.Iterator;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
/**
* Iterate over the s3 file system. This is really required to find the manifest file for restore
* and downloading incrementals. Created by aagrawal on 11/30/18.
*/
public class S3Iterator implements Iterator<String> {
private Iterator<String> iterator;
private ObjectListing objectListing;
private final AmazonS3 s3Client;
private final String bucket;
private final String prefix;
private final String delimiter;
private final String marker;
public S3Iterator(
AmazonS3 s3Client, String bucket, String prefix, String delimiter, String marker) {
this.s3Client = s3Client;
this.bucket = bucket;
this.prefix = prefix;
this.delimiter = delimiter;
this.marker = marker;
iterator = createIterator();
}
private void initListing() {
ListObjectsRequest listReq = new ListObjectsRequest();
listReq.setBucketName(bucket);
listReq.setPrefix(prefix);
if (StringUtils.isNotBlank(delimiter)) listReq.setDelimiter(delimiter);
if (StringUtils.isNotBlank(marker)) listReq.setMarker(marker);
objectListing = s3Client.listObjects(listReq);
}
private Iterator<String> createIterator() {
if (objectListing == null) initListing();
List<String> temp = Lists.newArrayList();
for (S3ObjectSummary summary : objectListing.getObjectSummaries()) {
temp.add(summary.getKey());
}
return temp.iterator();
}
@Override
public boolean hasNext() {
if (iterator.hasNext()) {
return true;
} else {
while (objectListing.isTruncated() && !iterator.hasNext()) {
objectListing = s3Client.listNextBatchOfObjects(objectListing);
iterator = createIterator();
}
}
return iterator.hasNext();
}
@Override
public String next() {
return iterator.next();
}
}
| 3,320 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws/S3CrossAccountFileSystem.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.aws;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3Client;
import com.netflix.priam.aws.auth.IS3Credential;
import com.netflix.priam.backup.IBackupFileSystem;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.identity.config.InstanceInfo;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Singleton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/*
* A version of S3FileSystem which allows it api access across different AWS accounts.
*
* *Note: ideally, this object should extend S3FileSystem but could not be done because:
* - S3FileSystem is a singleton and it uses DI. To follow the DI pattern, the best way to get this singleton is via injection.
* - S3FileSystem registers a MBean to JMX which must be only once per JVM. If not, you get
* java.lang.RuntimeException: javax.management.InstanceAlreadyExistsException: com.priam.aws.S3FileSystemMBean:name=S3FileSystemMBean
* -
*/
@Singleton
public class S3CrossAccountFileSystem {
private static final Logger logger = LoggerFactory.getLogger(S3CrossAccountFileSystem.class);
private AmazonS3 s3Client;
private final S3FileSystem s3fs;
private final IConfiguration config;
private final IS3Credential s3Credential;
private final InstanceInfo instanceInfo;
@Inject
public S3CrossAccountFileSystem(
@Named("backup") IBackupFileSystem fs,
@Named("awss3roleassumption") IS3Credential s3Credential,
IConfiguration config,
InstanceInfo instanceInfo) {
this.s3fs = (S3FileSystem) fs;
this.config = config;
this.s3Credential = s3Credential;
this.instanceInfo = instanceInfo;
}
public IBackupFileSystem getBackupFileSystem() {
return this.s3fs;
}
public AmazonS3 getCrossAcctS3Client() {
if (this.s3Client == null) {
synchronized (this) {
if (this.s3Client == null) {
try {
this.s3Client =
AmazonS3Client.builder()
.withCredentials(s3Credential.getAwsCredentialProvider())
.withRegion(instanceInfo.getRegion())
.build();
} catch (Exception e) {
throw new IllegalStateException(
"Exception in getting handle to s3 client. Msg: "
+ e.getLocalizedMessage(),
e);
}
// Lets leverage the IBackupFileSystem behaviors except we want it to use our
// amazon S3 client which has cross AWS account api capability.
this.s3fs.setS3Client(s3Client);
}
}
}
return this.s3Client;
}
}
| 3,321 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws/SDBInstanceFactory.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.aws;
import com.amazonaws.AmazonServiceException;
import com.google.common.collect.ImmutableSet;
import com.netflix.priam.identity.IPriamInstanceFactory;
import com.netflix.priam.identity.PriamInstance;
import com.netflix.priam.identity.config.InstanceInfo;
import java.util.*;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* SimpleDB based instance instanceIdentity. Requires 'InstanceIdentity' domain to be created ahead
*/
@Singleton
public class SDBInstanceFactory implements IPriamInstanceFactory {
private static final Logger logger = LoggerFactory.getLogger(SDBInstanceFactory.class);
private final SDBInstanceData dao;
private final InstanceInfo instanceInfo;
@Inject
public SDBInstanceFactory(SDBInstanceData dao, InstanceInfo instanceInfo) {
this.dao = dao;
this.instanceInfo = instanceInfo;
}
@Override
public ImmutableSet<PriamInstance> getAllIds(String appName) {
return ImmutableSet.copyOf(
dao.getAllIds(appName)
.stream()
.sorted((Comparator.comparingInt(PriamInstance::getId)))
.collect(Collectors.toList()));
}
@Override
public PriamInstance getInstance(String appName, String dc, int id) {
return dao.getInstance(appName, dc, id);
}
@Override
public PriamInstance create(
String app,
int id,
String instanceID,
String hostname,
String ip,
String rac,
Map<String, Object> volumes,
String token) {
try {
PriamInstance ins =
makePriamInstance(app, id, instanceID, hostname, ip, rac, volumes, token);
// remove old data node which are dead.
if (app.endsWith("-dead")) {
try {
PriamInstance oldData = dao.getInstance(app, instanceInfo.getRegion(), id);
// clean up a very old data...
if (null != oldData
&& oldData.getUpdatetime()
< (System.currentTimeMillis() - (3 * 60 * 1000)))
dao.deregisterInstance(oldData);
} catch (Exception ex) {
// Do nothing
logger.error(ex.getMessage(), ex);
}
}
dao.registerInstance(ins);
return ins;
} catch (Exception e) {
logger.error(e.getMessage());
throw new RuntimeException(e);
}
}
@Override
public void delete(PriamInstance inst) {
try {
dao.deregisterInstance(inst);
} catch (AmazonServiceException e) {
throw new RuntimeException("Unable to deregister priam instance", e);
}
}
@Override
public void update(PriamInstance orig, PriamInstance inst) {
try {
dao.updateInstance(orig, inst);
} catch (AmazonServiceException e) {
throw new RuntimeException("Unable to update/create priam instance", e);
}
}
private PriamInstance makePriamInstance(
String app,
int id,
String instanceID,
String hostname,
String ip,
String rac,
Map<String, Object> volumes,
String token) {
Map<String, Object> v = (volumes == null) ? new HashMap<>() : volumes;
PriamInstance ins = new PriamInstance();
ins.setApp(app);
ins.setRac(rac);
ins.setHost(hostname);
ins.setHostIP(ip);
ins.setId(id);
ins.setInstanceId(instanceID);
ins.setDC(instanceInfo.getRegion());
ins.setToken(token);
ins.setVolumes(v);
return ins;
}
}
| 3,322 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws/RemoteBackupPath.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.aws;
import com.google.api.client.util.Lists;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.netflix.priam.backup.AbstractBackupPath;
import com.netflix.priam.compress.CompressionType;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.identity.InstanceIdentity;
import com.netflix.priam.utils.DateUtil;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Instant;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.Optional;
import javax.inject.Inject;
/**
* Represents location of an object on the remote file system. All the objects will be keyed with a
* common prefix (based on configuration, typically environment), name of the cluster and token of
* this instance.
*/
public class RemoteBackupPath extends AbstractBackupPath {
private static final ImmutableSet<BackupFileType> V2_ONLY_FILE_TYPES =
ImmutableSet.of(BackupFileType.META_V2, BackupFileType.SST_V2);
@Inject
public RemoteBackupPath(IConfiguration config, InstanceIdentity factory) {
super(config, factory);
}
private ImmutableList.Builder<String> getV2Prefix() {
ImmutableList.Builder<String> prefix = ImmutableList.builder();
prefix.add(baseDir, prependHash(clusterName), token);
return prefix;
}
/* This will ensure that there is some randomness in the path at the start so that remote file systems
can hash the contents better when we have lot of clusters backing up at the same remote location.
*/
private String prependHash(String appName) {
return String.format("%d_%s", appName.hashCode() % 10000, appName);
}
private String removeHash(String appNameWithHash) {
int hash = Integer.parseInt(appNameWithHash.substring(0, appNameWithHash.indexOf("_")));
String appName = appNameWithHash.substring(appNameWithHash.indexOf("_") + 1);
Preconditions.checkArgument(
hash == appName.hashCode() % 10000,
"Prepended hash does not match app name. Should have received: "
+ prependHash(appName));
return appName;
}
/*
* This method will generate the location for the V2 backups.
* Note that we use epochMillis to sort the directory instead of traditional YYYYMMddHHmm. This will allow greater
* flexibility when doing restores as the s3 list calls with common prefix will have greater chance of match instead
* of traditional way (where it takes lot of s3 list calls when month or year changes).
* Another major difference w.r.t. V1 is having no distinction between SNAP and SST files as we upload SSTables only
* once to remote file system.
*/
private String getV2Location() {
ImmutableList.Builder<String> parts = getV2Prefix();
// JDK-8177809 truncate to seconds to ensure consistent behavior with our old method of
// getting lastModified time (File::lastModified) in Java 8.
long lastModified = getLastModified().toEpochMilli() / 1_000L * 1_000L;
parts.add(type.toString(), lastModified + "");
if (BackupFileType.isDataFile(type)) {
parts.add(keyspace, columnFamily);
}
parts.add(getCompression().toString(), getEncryption().toString(), fileName);
return toPath(parts.build()).toString();
}
private void parseV2Location(Path remotePath) {
Preconditions.checkArgument(
remotePath.getNameCount() >= 8,
String.format("%s has fewer than %d parts", remotePath, 8));
int index = 0;
baseDir = remotePath.getName(index++).toString();
clusterName = removeHash(remotePath.getName(index++).toString());
token = remotePath.getName(index++).toString();
type = BackupFileType.valueOf(remotePath.getName(index++).toString());
String lastModified = remotePath.getName(index++).toString();
setLastModified(Instant.ofEpochMilli(Long.parseLong(lastModified)));
List<String> parts = Lists.newArrayListWithCapacity(4);
if (BackupFileType.isDataFile(type)) {
keyspace = remotePath.getName(index++).toString();
columnFamily = remotePath.getName(index++).toString();
parts.add(keyspace);
parts.add(columnFamily);
}
setCompression(CompressionType.valueOf(remotePath.getName(index++).toString()));
setEncryption(remotePath.getName(index++).toString());
fileName = remotePath.getName(index).toString();
parts.add(fileName);
this.backupFile =
Paths.get(config.getDataFileLocation(), parts.toArray(new String[] {})).toFile();
}
private String getV1Location() {
ImmutableList.Builder<String> parts = ImmutableList.builder();
String timeString = DateUtil.formatyyyyMMddHHmm(time);
parts.add(baseDir, region, clusterName, token, timeString, type.toString());
if (BackupFileType.isDataFile(type)) {
parts.add(keyspace, columnFamily);
}
parts.add(fileName);
return toPath(parts.build()).toString();
}
private Path toPath(ImmutableList<String> parts) {
return Paths.get(parts.get(0), parts.subList(1, parts.size()).toArray(new String[0]));
}
private void parseV1Location(Path remotePath) {
Preconditions.checkArgument(
remotePath.getNameCount() >= 7,
String.format("%s has fewer than %d parts", remotePath, 7));
parseV1Prefix(remotePath);
time = DateUtil.getDate(remotePath.getName(4).toString());
type = BackupFileType.valueOf(remotePath.getName(5).toString());
if (BackupFileType.isDataFile(type)) {
keyspace = remotePath.getName(6).toString();
columnFamily = remotePath.getName(7).toString();
}
fileName = remotePath.getName(remotePath.getNameCount() - 1).toString();
}
private void parseV1Prefix(Path remotePath) {
Preconditions.checkArgument(
remotePath.getNameCount() >= 4,
String.format("%s needs %d parts to parse prefix", remotePath, 4));
baseDir = remotePath.getName(0).toString();
region = remotePath.getName(1).toString();
clusterName = remotePath.getName(2).toString();
token = remotePath.getName(3).toString();
}
/**
* Format of backup path: 1. For old style backups:
* BASE/REGION/CLUSTER/TOKEN/[SNAPSHOTTIME]/[SST|SNAP|META]/KEYSPACE/COLUMNFAMILY/FILE
*
* <p>2. For new style backups (SnapshotMetaService)
* BASE/[cluster_name_hash]_cluster/TOKEN//[META_V2|SST_V2]/KEYSPACE/COLUMNFAMILY/[last_modified_time_ms]/FILE.compression
*/
@Override
public String getRemotePath() {
return V2_ONLY_FILE_TYPES.contains(type) ? getV2Location() : getV1Location();
}
@Override
public void parseRemote(String remotePath) {
// Hack to determine type in advance of parsing. Will disappear once v1 is retired
Optional<BackupFileType> inferredType =
Arrays.stream(BackupFileType.values())
.filter(bft -> remotePath.contains(PATH_SEP + bft.toString() + PATH_SEP))
.findAny()
.filter(V2_ONLY_FILE_TYPES::contains);
if (inferredType.isPresent()) {
parseV2Location(Paths.get(remotePath));
} else {
parseV1Location(Paths.get(remotePath));
}
}
@Override
public void parsePartialPrefix(String remoteFilePath) {
parseV1Prefix(Paths.get(remoteFilePath));
}
@Override
public String remotePrefix(Date start, Date end, String location) {
return PATH_JOINER.join(
clusterPrefix(location),
instanceIdentity.getInstance().getToken(),
match(start, end));
}
@Override
public Path remoteV2Prefix(Path location, BackupFileType fileType) {
if (location.getNameCount() <= 1) {
baseDir = config.getBackupLocation();
clusterName = config.getAppName();
} else if (location.getNameCount() >= 3) {
baseDir = location.getName(1).toString();
clusterName = removeHash(location.getName(2).toString());
}
token = instanceIdentity.getInstance().getToken();
ImmutableList.Builder<String> parts = getV2Prefix();
parts.add(fileType.toString());
return toPath(parts.build());
}
@Override
public String clusterPrefix(String location) {
String[] elements = location.split(String.valueOf(RemoteBackupPath.PATH_SEP));
Preconditions.checkArgument(
elements.length < 2 || elements.length > 3,
"Path must have fewer than 2 or greater than 3 elements. Saw " + location);
if (elements.length <= 1) {
baseDir = config.getBackupLocation();
region = instanceIdentity.getInstanceInfo().getRegion();
clusterName = config.getAppName();
} else {
baseDir = elements[1];
region = elements[2];
clusterName = elements[3];
}
return PATH_JOINER.join(baseDir, region, clusterName, ""); // "" ensures a trailing "/"
}
}
| 3,323 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws/IAMCredential.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.aws;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
import com.netflix.priam.cred.ICredential;
public class IAMCredential implements ICredential {
private final InstanceProfileCredentialsProvider iamCredProvider;
public IAMCredential() {
this.iamCredProvider = InstanceProfileCredentialsProvider.getInstance();
}
public String getAccessKeyId() {
return iamCredProvider.getCredentials().getAWSAccessKeyId();
}
public String getSecretAccessKey() {
return iamCredProvider.getCredentials().getAWSSecretKey();
}
public AWSCredentials getCredentials() {
return iamCredProvider.getCredentials();
}
public AWSCredentialsProvider getAwsCredentialProvider() {
return iamCredProvider;
}
}
| 3,324 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws/auth/IS3Credential.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.aws.auth;
import com.amazonaws.auth.AWSCredentials;
import com.netflix.priam.cred.ICredential;
/*
* Credentials specific to Amazon S3
*/
public interface IS3Credential extends ICredential {
AWSCredentials getCredentials() throws Exception;
}
| 3,325 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws/auth/EC2RoleAssumptionCredential.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.aws.auth;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.cred.ICredential;
import com.netflix.priam.identity.config.InstanceInfo;
import javax.inject.Inject;
import org.apache.commons.lang3.StringUtils;
public class EC2RoleAssumptionCredential implements ICredential {
private static final String AWS_ROLE_ASSUMPTION_SESSION_NAME = "AwsRoleAssumptionSession";
private final ICredential cred;
private final IConfiguration config;
private final InstanceInfo instanceInfo;
private AWSCredentialsProvider stsSessionCredentialsProvider;
@Inject
public EC2RoleAssumptionCredential(
ICredential cred, IConfiguration config, InstanceInfo instanceInfo) {
this.cred = cred;
this.config = config;
this.instanceInfo = instanceInfo;
}
@Override
public AWSCredentialsProvider getAwsCredentialProvider() {
if (this.config.isDualAccount() || this.stsSessionCredentialsProvider == null) {
synchronized (this) {
if (this.stsSessionCredentialsProvider == null) {
String roleArn;
/**
* Create the assumed IAM role based on the environment. For example, if the
* current environment is VPC, then the assumed role is for EC2 classic, and
* vice versa.
*/
if (instanceInfo.getInstanceEnvironment()
== InstanceInfo.InstanceEnvironment.CLASSIC) {
roleArn = this.config.getClassicEC2RoleAssumptionArn();
// Env is EC2 classic --> IAM assumed role for VPC created
} else {
roleArn = this.config.getVpcEC2RoleAssumptionArn();
// Env is VPC --> IAM assumed role for EC2 classic created.
}
//
if (StringUtils.isEmpty(roleArn))
throw new NullPointerException(
"Role ARN is null or empty probably due to missing config entry");
/**
* Get handle to an implementation that uses AWS Security Token Service (STS) to
* create temporary, short-lived session with explicit refresh for session/token
* expiration.
*/
try {
this.stsSessionCredentialsProvider =
new STSAssumeRoleSessionCredentialsProvider(
this.cred.getAwsCredentialProvider(),
roleArn,
AWS_ROLE_ASSUMPTION_SESSION_NAME);
} catch (Exception ex) {
throw new IllegalStateException(
"Exception in getting handle to AWS Security Token Service (STS). Msg: "
+ ex.getLocalizedMessage(),
ex);
}
}
}
}
return this.stsSessionCredentialsProvider;
}
}
| 3,326 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws/auth/S3InstanceCredential.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.aws.auth;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
/*
* Provides credentials from the S3 instance.
*/
public class S3InstanceCredential implements IS3Credential {
private final InstanceProfileCredentialsProvider credentialsProvider;
public S3InstanceCredential() {
this.credentialsProvider = InstanceProfileCredentialsProvider.getInstance();
}
@Override
public AWSCredentials getCredentials() throws Exception {
return this.credentialsProvider.getCredentials();
}
@Override
public AWSCredentialsProvider getAwsCredentialProvider() {
return this.credentialsProvider;
}
}
| 3,327 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws | Create_ds/Priam/priam/src/main/java/com/netflix/priam/aws/auth/S3RoleAssumptionCredential.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.aws.auth;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.cred.ICredential;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Singleton
public class S3RoleAssumptionCredential implements IS3Credential {
private static final String AWS_ROLE_ASSUMPTION_SESSION_NAME = "S3RoleAssumptionSession";
private static final Logger logger = LoggerFactory.getLogger(S3RoleAssumptionCredential.class);
private final ICredential cred;
private final IConfiguration config;
private AWSCredentialsProvider stsSessionCredentialsProvider;
@Inject
public S3RoleAssumptionCredential(ICredential cred, IConfiguration config) {
this.cred = cred;
this.config = config;
}
@Override
public AWSCredentials getCredentials() throws Exception {
if (this.stsSessionCredentialsProvider == null) {
this.getAwsCredentialProvider();
}
return this.stsSessionCredentialsProvider.getCredentials();
}
/*
* Accessing an AWS resource requires a valid login token and credentials. Both information is provided by the provider.
* In addition, both login token and credentials can expire after a certain duration. If expired,
* the client needs to ask the provider to 'refresh" the information, hence the purpose of this behavior.
*
* TODO: this behavior needs to be part of the interface IS3Credential
*
*/
public void refresh() {
this.cred.getAwsCredentialProvider().refresh();
}
@Override
public AWSCredentialsProvider getAwsCredentialProvider() {
if (this.stsSessionCredentialsProvider == null) {
synchronized (this) {
if (this.stsSessionCredentialsProvider == null) {
final String roleArn = this.config.getAWSRoleAssumptionArn();
// IAM role created for bucket own by account "awsprodbackup"
if (StringUtils.isEmpty(roleArn)) {
logger.warn(
"Role ARN is null or empty probably due to missing config entry. Falling back to instance level credentials");
this.stsSessionCredentialsProvider = this.cred.getAwsCredentialProvider();
// throw new NullPointerException("Role ARN is null or empty probably due to
// missing config entry");
} else {
// Get handle to an implementation that uses AWS Security Token Service
// (STS) to create temporary, short-lived session with explicit refresh for
// session/token expiration.
try {
this.stsSessionCredentialsProvider =
new STSAssumeRoleSessionCredentialsProvider(
this.cred.getAwsCredentialProvider(),
roleArn,
AWS_ROLE_ASSUMPTION_SESSION_NAME);
} catch (Exception ex) {
throw new IllegalStateException(
"Exception in getting handle to AWS Security Token Service (STS). Msg: "
+ ex.getLocalizedMessage(),
ex);
}
}
}
}
}
return this.stsSessionCredentialsProvider;
}
}
| 3,328 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/IncrementalBackup.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backup;
import com.google.common.collect.ImmutableList;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.priam.backup.AbstractBackupPath.BackupFileType;
import com.netflix.priam.backupv2.SnapshotMetaTask;
import com.netflix.priam.config.IBackupRestoreConfig;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.scheduler.SimpleTimer;
import com.netflix.priam.scheduler.TaskTimer;
import java.io.File;
import java.io.FileFilter;
import java.nio.file.Path;
import java.util.Optional;
import java.util.Set;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/*
* Incremental/SSTable backup
*/
@Singleton
public class IncrementalBackup extends AbstractBackup {
private static final Logger logger = LoggerFactory.getLogger(IncrementalBackup.class);
public static final String JOBNAME = "IncrementalBackup";
private final BackupRestoreUtil backupRestoreUtil;
private final IBackupRestoreConfig backupRestoreConfig;
private final BackupHelper backupHelper;
@Inject
public IncrementalBackup(
IConfiguration config,
IBackupRestoreConfig backupRestoreConfig,
BackupHelper backupHelper) {
super(config);
// a means to upload audit trail (via meta_cf_yyyymmddhhmm.json) of files successfully
// uploaded)
this.backupRestoreConfig = backupRestoreConfig;
backupRestoreUtil =
new BackupRestoreUtil(
config.getIncrementalIncludeCFList(), config.getIncrementalExcludeCFList());
this.backupHelper = backupHelper;
}
@Override
public void execute() throws Exception {
// Clearing remotePath List
initiateBackup(INCREMENTAL_BACKUP_FOLDER, backupRestoreUtil);
}
/** Run every 10 Sec */
public static TaskTimer getTimer(
IConfiguration config, IBackupRestoreConfig backupRestoreConfig) {
if (IncrementalBackup.isEnabled(config, backupRestoreConfig))
return new SimpleTimer(JOBNAME, 10L * 1000);
return null;
}
private static void cleanOldBackups(IConfiguration configuration) throws Exception {
Set<Path> backupPaths =
AbstractBackup.getBackupDirectories(configuration, INCREMENTAL_BACKUP_FOLDER);
for (Path backupDirPath : backupPaths) {
FileUtils.cleanDirectory(backupDirPath.toFile());
}
}
public static boolean isEnabled(
IConfiguration configuration, IBackupRestoreConfig backupRestoreConfig) {
boolean enabled = false;
try {
// Once backup 1.0 is gone, we should not check for enableV2Backups.
enabled =
(configuration.isIncrementalBackupEnabled()
&& (SnapshotBackup.isBackupEnabled(configuration)
|| (backupRestoreConfig.enableV2Backups()
&& SnapshotMetaTask.isBackupEnabled(
backupRestoreConfig))));
logger.info("Incremental backups are enabled: {}", enabled);
if (!enabled) {
// Clean up the incremental backup folder.
cleanOldBackups(configuration);
}
} catch (Exception e) {
logger.error(
"Error while trying to find if incremental backup is enabled: "
+ e.getMessage());
}
return enabled;
}
@Override
public String getName() {
return JOBNAME;
}
@Override
protected void processColumnFamily(File backupDir) throws Exception {
BackupFileType fileType =
backupRestoreConfig.enableV2Backups() ? BackupFileType.SST_V2 : BackupFileType.SST;
// delete empty files to adapt to 2.1
FileFilter filter = (file) -> file.isFile() && file.canWrite() && file.length() == 0L;
for (File file : Optional.ofNullable(backupDir.listFiles(filter)).orElse(new File[] {})) {
FileUtils.deleteQuietly(file);
}
// upload SSTables and components
ImmutableList<ListenableFuture<AbstractBackupPath>> futures =
backupHelper.uploadAndDeleteAllFiles(
backupDir, fileType, config.enableAsyncIncremental());
for (ListenableFuture<AbstractBackupPath> future : futures) {
future.get();
}
}
}
| 3,329 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/Status.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backup;
/** Enum to describe the status of the snapshot/restore. */
public enum Status {
/** Denotes snapshot/restore has started successfully and is running. */
STARTED,
/** Denotes snapshot/restore has finished successfully. */
FINISHED,
/**
* Denotes snapshot/restore has failed to upload/restore successfully or there was a failure
* marking the snapshot/restore as failure.
*/
FAILED
}
| 3,330 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/DynamicRateLimiter.java | package com.netflix.priam.backup;
import com.google.inject.ImplementedBy;
import java.time.Instant;
@ImplementedBy(BackupDynamicRateLimiter.class)
public interface DynamicRateLimiter {
void acquire(AbstractBackupPath dir, Instant target, int tokens);
}
| 3,331 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/BackupRestoreException.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backup;
public class BackupRestoreException extends Exception {
private static final long serialVersionUID = 333L;
public BackupRestoreException(String message) {
super(message);
}
public BackupRestoreException(String message, Exception e) {
super(message, e);
}
}
| 3,332 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/IBackupStatusMgr.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.backup;
import com.google.inject.ImplementedBy;
import com.netflix.priam.utils.DateUtil;
import java.util.Date;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
/**
* This will store the status of snapshots as they start, fail or finish. By default they will save
* the snapshot status of last 60 days on instance. Created by aagrawal on 1/30/17.
*/
@ImplementedBy(FileSnapshotStatusMgr.class)
public interface IBackupStatusMgr {
/**
* Return the list of snapshot executed on provided day or null if not present.
*
* @param snapshotDate date on which snapshot was started.
* @return List of snapshots started on that day in descending order of snapshot start time.
*/
List<BackupMetadata> locate(Date snapshotDate);
/**
* Return the list of snapshot executed on provided day or null if not present.
*
* @param snapshotDate date on which snapshot was started in the format of yyyyMMdd or
* yyyyMMddHHmm.
* @return List of snapshots started on that day in descending order of snapshot start time.
*/
List<BackupMetadata> locate(String snapshotDate);
/**
* Save the status of snapshot BackupMetadata which started in-memory and other implementations,
* if any.
*
* @param backupMetadata backupmetadata that started
*/
void start(BackupMetadata backupMetadata);
/**
* Save the status of successfully finished snapshot BackupMetadata in-memory and other
* implementations, if any.
*
* @param backupMetadata backupmetadata that finished successfully
*/
void finish(BackupMetadata backupMetadata);
/**
* Save the status of failed backupmetadata in-memory and other implementations, if any.
*
* @param backupMetadata backupmetadata that failed
*/
void failed(BackupMetadata backupMetadata);
/**
* Update the backup information of backupmetadata in-memory and other implementations, if any.
*
* @param backupMetadata backupmetadata to be updated.
*/
void update(BackupMetadata backupMetadata);
/**
* Get the capacity of in-memory status map holding the snapshot status.
*
* @return capacity of in-memory snapshot status map.
*/
int getCapacity();
/**
* Get the entire map of snapshot status hold in-memory
*
* @return The map of snapshot status in-memory in format. Key is snapshot day in format of
* yyyyMMdd (start date of snapshot) with a list of snapshots in the descending order of
* snapshot start time.
*/
Map<String, LinkedList<BackupMetadata>> getAllSnapshotStatus();
/**
* Get the list of backup metadata which are finished and have started in the daterange
* provided, in reverse chronological order of start date.
*
* @param backupVersion backup version of the backups to search.
* @param dateRange time period in which snapshot should have started. Finish time may be after
* the endTime in input.
* @return list of backup metadata which satisfies the input criteria
*/
List<BackupMetadata> getLatestBackupMetadata(
BackupVersion backupVersion, DateUtil.DateRange dateRange);
}
| 3,333 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/RangeReadInputStream.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backup;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.GetObjectRequest;
import com.amazonaws.services.s3.model.S3ObjectInputStream;
import com.netflix.priam.utils.RetryableCallable;
import java.io.IOException;
import java.io.InputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An implementation of InputStream that will request explicit byte ranges of the target file. This
* will make it easier to retry a failed read - which is important if we don't want to \ throw away
* a 100Gb file and restart after reading 99Gb and failing.
*/
public class RangeReadInputStream extends InputStream {
private static final Logger logger = LoggerFactory.getLogger(RangeReadInputStream.class);
private final AmazonS3 s3Client;
private final String bucketName;
private final long fileSize;
private final String remotePath;
private long offset;
public RangeReadInputStream(
AmazonS3 s3Client, String bucketName, long fileSize, String remotePath) {
this.s3Client = s3Client;
this.bucketName = bucketName;
this.fileSize = fileSize;
this.remotePath = remotePath;
}
public int read(final byte b[], final int off, final int len) throws IOException {
if (fileSize > 0 && offset >= fileSize) return -1;
final long firstByte = offset;
long curEndByte = firstByte + len;
curEndByte = curEndByte <= fileSize ? curEndByte : fileSize;
// need to subtract one as the call to getRange is inclusive
// meaning if you want to download the first 10 bytes of a file, request bytes 0..9
final long endByte = curEndByte - 1;
try {
return new RetryableCallable<Integer>() {
public Integer retriableCall() throws IOException {
GetObjectRequest req = new GetObjectRequest(bucketName, remotePath);
req.setRange(firstByte, endByte);
try (S3ObjectInputStream is = s3Client.getObject(req).getObjectContent()) {
byte[] readBuf = new byte[4092];
int rCnt;
int readTotal = 0;
int incomingOffet = off;
while ((rCnt = is.read(readBuf, 0, readBuf.length)) >= 0) {
System.arraycopy(readBuf, 0, b, incomingOffet, rCnt);
readTotal += rCnt;
incomingOffet += rCnt;
}
if (readTotal == 0 && rCnt == -1) return -1;
offset += readTotal;
return readTotal;
}
}
}.call();
} catch (Exception e) {
String msg =
String.format(
"failed to read offset range %d-%d of file %s whose size is %d",
firstByte, endByte, remotePath, fileSize);
throw new IOException(msg, e);
}
}
public int read() throws IOException {
logger.warn("read() called RangeReadInputStream");
return -1;
}
}
| 3,334 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/IFileSystemContext.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.backup;
import com.google.inject.ImplementedBy;
import com.netflix.priam.config.IConfiguration;
@ImplementedBy(BackupFileSystemContext.class)
public interface IFileSystemContext {
IBackupFileSystem getFileStrategy(IConfiguration config);
}
| 3,335 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/CommitLogBackupTask.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.backup;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.scheduler.SimpleTimer;
import com.netflix.priam.scheduler.TaskTimer;
import java.io.File;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
// Provide this to be run as a Quart job
@Singleton
public class CommitLogBackupTask extends AbstractBackup {
public static final String JOBNAME = "CommitLogBackup";
private static final Logger logger = LoggerFactory.getLogger(CommitLogBackupTask.class);
private final CommitLogBackup clBackup;
@Inject
public CommitLogBackupTask(IConfiguration config, CommitLogBackup clBackup) {
super(config);
this.clBackup = clBackup;
}
@Override
public void execute() throws Exception {
try {
logger.debug("Checking for any archived commitlogs");
// double-check the permission
if (config.isBackingUpCommitLogs())
clBackup.upload(config.getCommitLogBackupRestoreFromDirs(), null);
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
@Override
public String getName() {
return JOBNAME;
}
public static TaskTimer getTimer(IConfiguration config) {
if (config.isBackingUpCommitLogs())
return new SimpleTimer(JOBNAME, 60L * 1000); // every 1 min
else return null;
}
@Override
protected void processColumnFamily(File columnFamilyDirectory) {
// Do nothing.
}
}
| 3,336 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/CommitLogBackup.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.backup;
import com.google.common.collect.Lists;
import com.netflix.priam.backup.AbstractBackupPath.BackupFileType;
import com.netflix.priam.utils.DateUtil;
import java.io.File;
import java.util.List;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Provider;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
// Providing this if we want to use it outside Quart
public class CommitLogBackup {
private static final Logger logger = LoggerFactory.getLogger(CommitLogBackup.class);
private final Provider<AbstractBackupPath> pathFactory;
private final List<String> clRemotePaths = Lists.newArrayList();
private final IBackupFileSystem fs;
@Inject
public CommitLogBackup(
Provider<AbstractBackupPath> pathFactory, @Named("backup") IBackupFileSystem fs) {
this.pathFactory = pathFactory;
this.fs = fs;
}
public List<AbstractBackupPath> upload(String archivedDir, final String snapshotName)
throws Exception {
logger.info("Inside upload CommitLog files");
if (StringUtils.isBlank(archivedDir)) {
throw new IllegalArgumentException("The archived commitlog director is blank or null");
}
File archivedCommitLogDir = new File(archivedDir);
if (!archivedCommitLogDir.exists()) {
throw new IllegalArgumentException(
"The archived commitlog director does not exist: " + archivedDir);
}
if (logger.isDebugEnabled()) {
logger.debug("Scanning for backup in: {}", archivedCommitLogDir.getAbsolutePath());
}
List<AbstractBackupPath> bps = Lists.newArrayList();
for (final File file : archivedCommitLogDir.listFiles()) {
logger.debug("Uploading commit log {} for backup", file.getCanonicalFile());
try {
AbstractBackupPath bp = pathFactory.get();
bp.parseLocal(file, BackupFileType.CL);
if (snapshotName != null) bp.time = DateUtil.getDate(snapshotName);
fs.uploadAndDelete(bp, false /* async */);
bps.add(bp);
addToRemotePath(bp.getRemotePath());
} catch (Exception e) {
logger.error(
"Failed to upload local file {}. Ignoring to continue with rest of backup.",
file,
e);
}
}
return bps;
}
private void addToRemotePath(String remotePath) {
this.clRemotePaths.add(remotePath);
}
}
| 3,337 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/BackupHelper.java | package com.netflix.priam.backup;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.inject.ImplementedBy;
import java.io.File;
import java.io.IOException;
import java.time.Instant;
@ImplementedBy(BackupHelperImpl.class)
public interface BackupHelper {
default ImmutableList<ListenableFuture<AbstractBackupPath>> uploadAndDeleteAllFiles(
final File parent, final AbstractBackupPath.BackupFileType type, boolean async)
throws Exception {
return uploadAndDeleteAllFiles(parent, type, Instant.EPOCH, async);
}
ImmutableList<ListenableFuture<AbstractBackupPath>> uploadAndDeleteAllFiles(
final File parent,
final AbstractBackupPath.BackupFileType type,
Instant target,
boolean async)
throws Exception;
ImmutableSet<AbstractBackupPath> getBackupPaths(
File dir, AbstractBackupPath.BackupFileType type) throws IOException;
}
| 3,338 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/BackupVerificationResult.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.backup;
import com.netflix.priam.utils.GsonJsonSerializer;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
/**
* Created by aagrawal on 2/16/17. This class holds the result from BackupVerification. The default
* are all null and false.
*/
public class BackupVerificationResult {
public boolean valid = false;
public String remotePath = null;
public Instant snapshotInstant = null;
public boolean manifestAvailable = false;
public List<String> filesInMetaOnly = new ArrayList<>();
public int filesMatched = 0;
@Override
public String toString() {
return GsonJsonSerializer.getGson().toJson(this);
}
}
| 3,339 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/FileSnapshotStatusMgr.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backup;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.health.InstanceState;
import com.netflix.priam.utils.MaxSizeHashMap;
import java.io.*;
import java.util.LinkedList;
import java.util.Map;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Default implementation for {@link IBackupStatusMgr}. This will save the snapshot status in local
* file. Created by aagrawal on 7/11/17.
*/
@Singleton
public class FileSnapshotStatusMgr extends BackupStatusMgr {
private static final Logger logger = LoggerFactory.getLogger(FileSnapshotStatusMgr.class);
private static final int IN_MEMORY_SNAPSHOT_CAPACITY = 60;
private final String filename;
/**
* Constructor to initialize the file based snapshot status manager.
*
* @param config {@link IConfiguration} of priam to find where file should be saved/read from.
* @param instanceState Status of the instance encapsulating health and other metadata of Priam
* and Cassandra.
*/
@Inject
public FileSnapshotStatusMgr(IConfiguration config, InstanceState instanceState) {
super(
IN_MEMORY_SNAPSHOT_CAPACITY,
instanceState); // Fetch capacity from properties, if required.
this.filename = config.getBackupStatusFileLoc();
init();
}
private void init() {
// Retrieve entire file and re-populate the list.
File snapshotFile = new File(filename);
if (!snapshotFile.exists()) {
snapshotFile.getParentFile().mkdirs();
logger.info(
"Snapshot status file do not exist on system. Bypassing initilization phase.");
backupMetadataMap = new MaxSizeHashMap<>(capacity);
return;
}
try (final ObjectInputStream inputStream =
new ObjectInputStream(new FileInputStream(snapshotFile))) {
backupMetadataMap = (Map<String, LinkedList<BackupMetadata>>) inputStream.readObject();
logger.info(
"Snapshot status of size {} fetched successfully from {}",
backupMetadataMap.size(),
filename);
} catch (IOException e) {
logger.error(
"Error while trying to fetch snapshot status from {}. Error: {}. If this is first time after upgrading Priam, ignore this.",
filename,
e.getLocalizedMessage());
e.printStackTrace();
} catch (Exception e) {
logger.error(
"Error while trying to fetch snapshot status from {}. Error: {}.",
filename,
e.getLocalizedMessage());
e.printStackTrace();
}
if (backupMetadataMap == null) backupMetadataMap = new MaxSizeHashMap<>(capacity);
}
@Override
public void save(BackupMetadata backupMetadata) {
File snapshotFile = new File(filename);
if (!snapshotFile.exists()) snapshotFile.getParentFile().mkdirs();
// Will save entire list to file.
try (final ObjectOutputStream out =
new ObjectOutputStream(new FileOutputStream(filename))) {
out.writeObject(backupMetadataMap);
out.flush();
logger.info(
"Snapshot status of size {} is saved to {}",
backupMetadataMap.size(),
filename);
} catch (IOException e) {
logger.error(
"Error while trying to persist snapshot status to {}. Error: {}",
filename,
e.getLocalizedMessage());
}
}
@Override
public LinkedList<BackupMetadata> fetch(String snapshotDate) {
// No need to fetch from local machine as it was read once at start. No point reading again
// and again.
return backupMetadataMap.get(snapshotDate);
}
}
| 3,340 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/BackupDynamicRateLimiter.java | package com.netflix.priam.backup;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.RateLimiter;
import com.netflix.priam.config.IConfiguration;
import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
import javax.inject.Inject;
public class BackupDynamicRateLimiter implements DynamicRateLimiter {
private final Clock clock;
private final IConfiguration config;
private final DirectorySize dirSize;
private final RateLimiter rateLimiter;
@Inject
public BackupDynamicRateLimiter(IConfiguration config, Clock clock, DirectorySize dirSize) {
this.clock = clock;
this.config = config;
this.dirSize = dirSize;
this.rateLimiter = RateLimiter.create(Double.MAX_VALUE);
}
@Override
public void acquire(AbstractBackupPath path, Instant target, int permits) {
if (target.equals(Instant.EPOCH)
|| !path.getBackupFile()
.getAbsolutePath()
.contains(AbstractBackup.SNAPSHOT_FOLDER)) {
return;
}
long secondsRemaining = Duration.between(clock.instant(), target).getSeconds();
if (secondsRemaining < 1) {
// skip file system checks when unnecessary
return;
}
int backupThreads = config.getBackupThreads();
Preconditions.checkState(backupThreads > 0);
long bytesPerThread = this.dirSize.getBytes(config.getDataFileLocation()) / backupThreads;
if (bytesPerThread < 1) {
return;
}
double newRate = (double) bytesPerThread / secondsRemaining;
double oldRate = rateLimiter.getRate();
if ((Math.abs(newRate - oldRate) / oldRate) > config.getRateLimitChangeThreshold()) {
rateLimiter.setRate(newRate);
}
rateLimiter.acquire(permits);
}
}
| 3,341 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/BackupStatusMgr.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.backup;
import com.netflix.priam.health.InstanceState;
import com.netflix.priam.utils.DateUtil;
import com.netflix.priam.utils.MaxSizeHashMap;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import java.util.*;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/*
* A means to manage metadata for various types of backups (snapshots, incrementals)
*/
@Singleton
public abstract class BackupStatusMgr implements IBackupStatusMgr {
private static final Logger logger = LoggerFactory.getLogger(BackupStatusMgr.class);
/**
* Map<yyyymmdd, List<{@link BackupMetadata}>: Map of completed snapshots represented by its
* snapshot day (yyyymmdd) and a list of snapshots started on that day Note: A {@link
* LinkedList} was chosen for fastest retrieval of latest snapshot.
*/
Map<String, LinkedList<BackupMetadata>> backupMetadataMap;
final int capacity;
private final InstanceState instanceState;
/**
* @param capacity Capacity to hold in-memory snapshot status days.
* @param instanceState Status of the instance encapsulating health and other metadata of Priam
* and Cassandra.
*/
@Inject
public BackupStatusMgr(int capacity, InstanceState instanceState) {
this.capacity = capacity;
this.instanceState = instanceState;
// This is to avoid us loading lot of status in memory.
// We will fetch previous status from backend service, if required.
backupMetadataMap = new MaxSizeHashMap<>(capacity);
}
@Override
public int getCapacity() {
return capacity;
}
@Override
public Map<String, LinkedList<BackupMetadata>> getAllSnapshotStatus() {
return backupMetadataMap;
}
@Override
public LinkedList<BackupMetadata> locate(Date snapshotDate) {
return locate(DateUtil.formatyyyyMMdd(snapshotDate));
}
@Override
public LinkedList<BackupMetadata> locate(String snapshotDate) {
if (StringUtils.isEmpty(snapshotDate)) return null;
// See if in memory
if (backupMetadataMap.containsKey(snapshotDate)) return backupMetadataMap.get(snapshotDate);
LinkedList<BackupMetadata> metadataLinkedList = fetch(snapshotDate);
// Save the result in local cache so we don't hit data store/file.
backupMetadataMap.put(snapshotDate, metadataLinkedList);
return metadataLinkedList;
}
@Override
public void start(BackupMetadata backupMetadata) {
LinkedList<BackupMetadata> metadataLinkedList = locate(backupMetadata.getSnapshotDate());
if (metadataLinkedList == null) {
metadataLinkedList = new LinkedList<>();
}
metadataLinkedList.addFirst(backupMetadata);
backupMetadataMap.put(backupMetadata.getSnapshotDate(), metadataLinkedList);
instanceState.setBackupStatus(backupMetadata);
// Save the backupMetaDataMap
save(backupMetadata);
}
@Override
public void finish(BackupMetadata backupMetadata) {
// validate that it has actually finished. If not, then set the status and current date.
if (backupMetadata.getStatus() != Status.FINISHED)
backupMetadata.setStatus(Status.FINISHED);
if (backupMetadata.getCompleted() == null)
backupMetadata.setCompleted(
Calendar.getInstance(TimeZone.getTimeZone("GMT")).getTime());
instanceState.setBackupStatus(backupMetadata);
update(backupMetadata);
}
@Override
public void update(BackupMetadata backupMetadata) {
// Retrieve the snapshot metadata and then update the finish date/status.
retrieveAndUpdate(backupMetadata);
// Save the backupMetaDataMap
save(backupMetadata);
}
private void retrieveAndUpdate(final BackupMetadata backupMetadata) {
// Retrieve the snapshot metadata and then update the date/status.
LinkedList<BackupMetadata> metadataLinkedList = locate(backupMetadata.getSnapshotDate());
if (metadataLinkedList == null) {
logger.error(
"No previous backupMetaData found. This should not happen. Creating new to ensure app keeps running.");
metadataLinkedList = new LinkedList<>();
backupMetadataMap.put(backupMetadata.getSnapshotDate(), metadataLinkedList);
}
Optional<BackupMetadata> searchedData =
metadataLinkedList
.stream()
.filter(backupMetadata1 -> backupMetadata.equals(backupMetadata1))
.findFirst();
if (!searchedData.isPresent()) {
metadataLinkedList.addFirst(backupMetadata);
}
searchedData.ifPresent(
backupMetadata1 -> {
backupMetadata1.setCompleted(backupMetadata.getCompleted());
backupMetadata1.setStatus(backupMetadata.getStatus());
backupMetadata1.setCassandraSnapshotSuccess(
backupMetadata.isCassandraSnapshotSuccess());
backupMetadata1.setSnapshotLocation(backupMetadata.getSnapshotLocation());
backupMetadata1.setLastValidated(backupMetadata.getLastValidated());
});
}
@Override
public void failed(BackupMetadata backupMetadata) {
// validate that it has actually failed. If not, then set the status and current date.
if (backupMetadata.getCompleted() == null)
backupMetadata.setCompleted(
Calendar.getInstance(TimeZone.getTimeZone("GMT")).getTime());
// Set this later to ensure the status
if (backupMetadata.getStatus() != Status.FAILED) backupMetadata.setStatus(Status.FAILED);
instanceState.setBackupStatus(backupMetadata);
update(backupMetadata);
}
/**
* Implementation on how to save the backup metadata
*
* @param backupMetadata BackupMetadata to be saved
*/
protected abstract void save(BackupMetadata backupMetadata);
/**
* Implementation on how to retrieve the backup metadata(s) for a given date from store.
*
* @param snapshotDate Snapshot date to be retrieved from datastore in format of yyyyMMdd
* @return The list of snapshots started on the snapshot day in descending order of snapshot
* start time.
*/
protected abstract LinkedList<BackupMetadata> fetch(String snapshotDate);
public List<BackupMetadata> getLatestBackupMetadata(
BackupVersion backupVersion, DateUtil.DateRange dateRange) {
Instant startDay = dateRange.getStartTime().truncatedTo(ChronoUnit.DAYS);
Instant endDay = dateRange.getEndTime().truncatedTo(ChronoUnit.DAYS);
List<BackupMetadata> allBackups = new ArrayList<>();
Instant previousDay = endDay;
do {
// We need to find the latest backupmetadata in this date range.
logger.info(
"Will try to find snapshot for : {}",
DateUtil.formatInstant(DateUtil.yyyyMMddHHmm, previousDay));
List<BackupMetadata> backupsForDate = locate(new Date(previousDay.toEpochMilli()));
if (backupsForDate != null) allBackups.addAll(backupsForDate);
previousDay = previousDay.minus(1, ChronoUnit.DAYS);
} while (!previousDay.isBefore(startDay));
// Return all the backups which are FINISHED and were "started" in the dateRange provided.
// Do not compare the end time of snapshot as it may take random amount of time to finish
// the snapshot.
return allBackups
.stream()
.filter(Objects::nonNull)
.filter(backupMetadata -> backupMetadata.getStatus() == Status.FINISHED)
.filter(backupMetadata -> backupMetadata.getBackupVersion().equals(backupVersion))
.filter(
backupMetadata ->
backupMetadata
.getStart()
.toInstant()
.compareTo(dateRange.getStartTime())
>= 0
&& backupMetadata
.getStart()
.toInstant()
.compareTo(dateRange.getEndTime())
<= 0)
.sorted(Comparator.comparing(BackupMetadata::getStart).reversed())
.collect(Collectors.toList());
}
@Override
public String toString() {
return "BackupStatusMgr{"
+ "backupMetadataMap="
+ backupMetadataMap
+ ", capacity="
+ capacity
+ '}';
}
}
| 3,342 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/BackupVersion.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backup;
import com.netflix.priam.scheduler.UnsupportedTypeException;
import java.util.HashMap;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Enum to capture backup versions. Possible version are V1 and V2. Created by aagrawal on 1/29/19.
*/
public enum BackupVersion {
SNAPSHOT_BACKUP(1),
SNAPSHOT_META_SERVICE(2);
private static final Logger logger = LoggerFactory.getLogger(BackupVersion.class);
private final int backupVersion;
private static Map<Integer, BackupVersion> map = new HashMap<>();
static {
for (BackupVersion backupVersion : BackupVersion.values()) {
map.put(backupVersion.getBackupVersion(), backupVersion);
}
}
BackupVersion(int backupVersion) {
this.backupVersion = backupVersion;
}
public static BackupVersion lookup(int backupVersion, boolean acceptIllegalValue)
throws UnsupportedTypeException {
BackupVersion backupVersionResolved = map.get(backupVersion);
if (backupVersionResolved == null) {
String message =
String.format(
"%s is not a supported BackupVersion. Supported values are %s",
backupVersion, getSupportedValues());
if (acceptIllegalValue) {
message =
message
+ ". Since acceptIllegalValue is set to True, returning NULL instead.";
logger.error(message);
return null;
}
logger.error(message);
throw new UnsupportedTypeException(message);
}
return backupVersionResolved;
}
private static String getSupportedValues() {
StringBuilder supportedValues = new StringBuilder();
boolean first = true;
for (BackupVersion type : BackupVersion.values()) {
if (!first) {
supportedValues.append(",");
}
supportedValues.append(type);
first = false;
}
return supportedValues.toString();
}
public static BackupVersion lookup(int backupVersion) throws UnsupportedTypeException {
return lookup(backupVersion, false);
}
public int getBackupVersion() {
return backupVersion;
}
}
| 3,343 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/BackupHelperImpl.java | package com.netflix.priam.backup;
import static java.util.stream.Collectors.toSet;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.priam.compress.CompressionType;
import com.netflix.priam.config.BackupsToCompress;
import com.netflix.priam.config.IConfiguration;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.Instant;
import java.util.Set;
import java.util.stream.Stream;
import javax.inject.Inject;
import javax.inject.Provider;
public class BackupHelperImpl implements BackupHelper {
private static final String COMPRESSION_SUFFIX = "-CompressionInfo.db";
private static final String DATA_SUFFIX = "-Data.db";
private final Provider<AbstractBackupPath> pathFactory;
private final IBackupFileSystem fs;
private final IConfiguration config;
@Inject
public BackupHelperImpl(
IConfiguration config,
IFileSystemContext backupFileSystemCtx,
Provider<AbstractBackupPath> pathFactory) {
this.config = config;
this.pathFactory = pathFactory;
this.fs = backupFileSystemCtx.getFileStrategy(config);
}
/**
* Upload files in the specified dir. Does not delete the file in case of error. The files are
* uploaded serially or async based on flag provided.
*
* @param parent Parent dir
* @param type Type of file (META, SST, SNAP etc)
* @param async Upload the file(s) in async fashion if enabled.
* @return List of files that are successfully uploaded as part of backup
* @throws Exception when there is failure in uploading files.
*/
@Override
public ImmutableList<ListenableFuture<AbstractBackupPath>> uploadAndDeleteAllFiles(
final File parent,
final AbstractBackupPath.BackupFileType type,
Instant target,
boolean async)
throws Exception {
final ImmutableList.Builder<ListenableFuture<AbstractBackupPath>> futures =
ImmutableList.builder();
for (AbstractBackupPath bp : getBackupPaths(parent, type)) {
futures.add(fs.uploadAndDelete(bp, target, async));
}
return futures.build();
}
@Override
public ImmutableSet<AbstractBackupPath> getBackupPaths(
File dir, AbstractBackupPath.BackupFileType type) throws IOException {
Set<File> files;
try (Stream<Path> pathStream = Files.list(dir.toPath())) {
files = pathStream.map(Path::toFile).filter(File::isFile).collect(toSet());
}
Set<String> compressedFilePrefixes =
files.stream()
.map(File::getName)
.filter(name -> name.endsWith(COMPRESSION_SUFFIX))
.map(name -> name.substring(0, name.lastIndexOf('-')))
.collect(toSet());
final ImmutableSet.Builder<AbstractBackupPath> bps = ImmutableSet.builder();
ImmutableSet.Builder<AbstractBackupPath> dataFiles = ImmutableSet.builder();
for (File file : files) {
final AbstractBackupPath bp = pathFactory.get();
bp.parseLocal(file, type);
bp.setCompression(getCorrectCompressionAlgorithm(bp, compressedFilePrefixes));
(file.getAbsolutePath().endsWith(DATA_SUFFIX) ? dataFiles : bps).add(bp);
}
bps.addAll(dataFiles.build());
return bps.build();
}
private CompressionType getCorrectCompressionAlgorithm(
AbstractBackupPath path, Set<String> compressedFiles) {
if (!AbstractBackupPath.BackupFileType.isV2(path.getType())
|| path.getLastModified().toEpochMilli()
< config.getCompressionTransitionEpochMillis()) {
return CompressionType.SNAPPY;
}
String file = path.getFileName();
BackupsToCompress which = config.getBackupsToCompress();
switch (which) {
case NONE:
return CompressionType.NONE;
case ALL:
return CompressionType.SNAPPY;
case IF_REQUIRED:
int splitIndex = file.lastIndexOf('-');
return splitIndex >= 0 && compressedFiles.contains(file.substring(0, splitIndex))
? CompressionType.NONE
: CompressionType.SNAPPY;
default:
throw new IllegalArgumentException("NONE, ALL, UNCOMPRESSED only. Saw: " + which);
}
}
}
| 3,344 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/MetaData.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backup;
import com.netflix.priam.backup.AbstractBackupPath.BackupFileType;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.utils.DateUtil;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
import javax.inject.Inject;
import javax.inject.Provider;
import org.apache.commons.io.FileUtils;
import org.json.simple.JSONArray;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class to create a meta data file with a list of snapshot files. Also list the contents of a meta
* data file.
*/
public class MetaData {
private static final Logger logger = LoggerFactory.getLogger(MetaData.class);
private final Provider<AbstractBackupPath> pathFactory;
private final List<String> metaRemotePaths = new ArrayList<>();
private final IBackupFileSystem fs;
@Inject
public MetaData(
Provider<AbstractBackupPath> pathFactory,
IFileSystemContext backupFileSystemCtx,
IConfiguration config) {
this.pathFactory = pathFactory;
this.fs = backupFileSystemCtx.getFileStrategy(config);
}
public AbstractBackupPath set(List<AbstractBackupPath> bps, String snapshotName)
throws Exception {
File metafile = createTmpMetaFile();
try (FileWriter fr = new FileWriter(metafile)) {
JSONArray jsonObj = new JSONArray();
for (AbstractBackupPath filePath : bps) jsonObj.add(filePath.getRemotePath());
fr.write(jsonObj.toJSONString());
}
AbstractBackupPath backupfile = decorateMetaJson(metafile, snapshotName);
fs.uploadAndDelete(backupfile, false /* async */);
addToRemotePath(backupfile.getRemotePath());
return backupfile;
}
/*
From the meta.json to be created, populate its meta data for the backup file.
*/
public AbstractBackupPath decorateMetaJson(File metafile, String snapshotName)
throws ParseException {
AbstractBackupPath backupfile = pathFactory.get();
backupfile.parseLocal(metafile, BackupFileType.META);
backupfile.setTime(DateUtil.getDate(snapshotName));
return backupfile;
}
/*
* Determines the existence of the backup meta file. This meta file could be snapshot (meta.json) or
* incrementals (meta_keyspace_cf..json).
*
* @param backup meta file to search
* @return true if backup meta file exist, false otherwise.
*/
public Boolean doesExist(final AbstractBackupPath meta) {
try {
fs.downloadFile(meta, "" /* suffix */, 5 /* retries */);
} catch (Exception e) {
logger.error("Error downloading the Meta data try with a different date...", e);
}
return meta.newRestoreFile().exists();
}
public File createTmpMetaFile() throws IOException {
File metafile = File.createTempFile("meta", ".json");
File destFile = new File(metafile.getParent(), "meta.json");
if (destFile.exists()) destFile.delete();
FileUtils.moveFile(metafile, destFile);
return destFile;
}
private void addToRemotePath(String remotePath) {
metaRemotePaths.add(remotePath);
}
}
| 3,345 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/BackupFolder.java | package com.netflix.priam.backup;
import java.util.Arrays;
import java.util.Optional;
public enum BackupFolder {
SNAPSHOTS("snapshots"),
BACKUPS("backups");
private String name;
BackupFolder(String name) {
this.name = name;
}
public static Optional<BackupFolder> fromName(String name) {
return Arrays.stream(values()).filter(b -> b.name.equals(name)).findFirst();
}
}
| 3,346 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/BackupRestoreUtil.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backup;
import com.google.common.collect.ImmutableMap;
import com.netflix.priam.backupv2.IMetaProxy;
import com.netflix.priam.backupv2.MetaV2Proxy;
import com.netflix.priam.utils.DateUtil;
import java.nio.file.Path;
import java.time.Instant;
import java.util.*;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Provider;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
/** Helper methods applicable to both backup and restore */
public class BackupRestoreUtil {
private static final Pattern columnFamilyFilterPattern = Pattern.compile(".\\..");
private final Map<String, List<String>> includeFilter;
private final Map<String, List<String>> excludeFilter;
private static final Map<String, List<String>> FILTER_COLUMN_FAMILY =
ImmutableMap.of(
"system",
Arrays.asList(
"local", "peers", "hints", "compactions_in_progress", "LocationInfo"));
@Inject
public BackupRestoreUtil(String configIncludeFilter, String configExcludeFilter) {
includeFilter = getFilter(configIncludeFilter);
excludeFilter = getFilter(configExcludeFilter);
}
public static Optional<AbstractBackupPath> getLatestValidMetaPath(
IMetaProxy metaProxy, DateUtil.DateRange dateRange) {
return metaProxy
.findMetaFiles(dateRange)
.stream()
.filter(meta -> metaProxy.isMetaFileValid(meta).valid)
.findFirst();
}
public static List<AbstractBackupPath> getMostRecentSnapshotPaths(
AbstractBackupPath latestValidMetaFile,
IMetaProxy metaProxy,
Provider<AbstractBackupPath> pathProvider)
throws Exception {
Path metaFile = metaProxy.downloadMetaFile(latestValidMetaFile);
List<AbstractBackupPath> snapshotPaths =
metaProxy
.getSSTFilesFromMeta(metaFile)
.stream()
.map(
value -> {
AbstractBackupPath path = pathProvider.get();
path.parseRemote(value);
return path;
})
.collect(Collectors.toList());
FileUtils.deleteQuietly(metaFile.toFile());
return snapshotPaths;
}
public static List<AbstractBackupPath> getIncrementalPaths(
AbstractBackupPath latestValidMetaFile,
DateUtil.DateRange dateRange,
IMetaProxy metaProxy) {
Instant snapshotTime;
if (metaProxy instanceof MetaV2Proxy) snapshotTime = latestValidMetaFile.getLastModified();
else snapshotTime = latestValidMetaFile.getTime().toInstant();
DateUtil.DateRange incrementalDateRange =
new DateUtil.DateRange(snapshotTime, dateRange.getEndTime());
List<AbstractBackupPath> incrementalPaths = new ArrayList<>();
metaProxy.getIncrementals(incrementalDateRange).forEachRemaining(incrementalPaths::add);
return incrementalPaths;
}
public static Map<String, List<String>> getFilter(String inputFilter)
throws IllegalArgumentException {
if (StringUtils.isEmpty(inputFilter)) return null;
final Map<String, List<String>> columnFamilyFilter = new HashMap<>();
String[] filters = inputFilter.split(",");
for (String cfFilter : filters) {
if (columnFamilyFilterPattern.matcher(cfFilter).find()) {
String[] filter = cfFilter.split("\\.");
String keyspaceName = filter[0];
String columnFamilyName = filter[1];
if (columnFamilyName.contains("-"))
columnFamilyName = columnFamilyName.substring(0, columnFamilyName.indexOf("-"));
List<String> existingCfs =
columnFamilyFilter.getOrDefault(keyspaceName, new ArrayList<>());
if (!columnFamilyName.equalsIgnoreCase("*")) existingCfs.add(columnFamilyName);
columnFamilyFilter.put(keyspaceName, existingCfs);
} else {
throw new IllegalArgumentException(
"Invalid format: " + cfFilter + ". \"keyspace.columnfamily\" is required.");
}
}
return columnFamilyFilter;
}
/**
* Returns if provided keyspace and/or columnfamily is filtered for backup or restore.
*
* @param keyspace name of the keyspace in consideration
* @param columnFamilyDir name of the columnfamily directory in consideration
* @return true if directory should be filter from processing; otherwise, false.
*/
public final boolean isFiltered(String keyspace, String columnFamilyDir) {
if (StringUtils.isEmpty(keyspace) || StringUtils.isEmpty(columnFamilyDir)) return false;
String columnFamilyName = columnFamilyDir.split("-")[0];
if (FILTER_COLUMN_FAMILY.containsKey(keyspace)
&& FILTER_COLUMN_FAMILY.get(keyspace).contains(columnFamilyName)) return true;
if (excludeFilter != null)
if (excludeFilter.containsKey(keyspace)
&& (excludeFilter.get(keyspace).isEmpty()
|| excludeFilter.get(keyspace).contains(columnFamilyName))) {
return true;
}
if (includeFilter != null)
return !(includeFilter.containsKey(keyspace)
&& (includeFilter.get(keyspace).isEmpty()
|| includeFilter.get(keyspace).contains(columnFamilyName)));
return false;
}
}
| 3,347 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/AbstractFileSystem.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backup;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import com.netflix.priam.backup.AbstractBackupPath.BackupFileType;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.merics.BackupMetrics;
import com.netflix.priam.notification.BackupNotificationMgr;
import com.netflix.priam.notification.UploadStatus;
import com.netflix.priam.scheduler.BlockingSubmitThreadPoolExecutor;
import com.netflix.priam.utils.BoundedExponentialRetryCallable;
import com.netflix.spectator.api.patterns.PolledMeter;
import java.io.File;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Instant;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.*;
import javax.inject.Inject;
import javax.inject.Provider;
import org.apache.commons.collections4.iterators.FilterIterator;
import org.apache.commons.collections4.iterators.TransformIterator;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is responsible for managing parallelism and orchestrating the upload and download, but
* the subclasses actually implement the details of uploading a file.
*
* <p>Created by aagrawal on 8/30/18.
*/
public abstract class AbstractFileSystem implements IBackupFileSystem {
private static final Logger logger = LoggerFactory.getLogger(AbstractFileSystem.class);
protected final Provider<AbstractBackupPath> pathProvider;
private final IConfiguration configuration;
protected final BackupMetrics backupMetrics;
private final Set<Path> tasksQueued;
private final ListeningExecutorService fileUploadExecutor;
private final ThreadPoolExecutor fileDownloadExecutor;
private final BackupNotificationMgr backupNotificationMgr;
// This is going to be a write-thru cache containing the most frequently used items from remote
// file system. This is to ensure that we don't make too many API calls to remote file system.
private final Cache<Path, Boolean> objectCache;
@Inject
public AbstractFileSystem(
IConfiguration configuration,
BackupMetrics backupMetrics,
BackupNotificationMgr backupNotificationMgr,
Provider<AbstractBackupPath> pathProvider) {
this.configuration = configuration;
this.backupMetrics = backupMetrics;
this.pathProvider = pathProvider;
this.backupNotificationMgr = backupNotificationMgr;
this.objectCache =
CacheBuilder.newBuilder().maximumSize(configuration.getBackupQueueSize()).build();
tasksQueued = new ConcurrentHashMap<>().newKeySet();
/*
Note: We are using different queue for upload and download as with Backup V2.0 we might download all the meta
files for "sync" feature which might compete with backups for scheduling.
Also, we may want to have different TIMEOUT for each kind of operation (upload/download) based on our file system choices.
*/
BlockingQueue<Runnable> uploadQueue =
new ArrayBlockingQueue<>(configuration.getBackupQueueSize());
PolledMeter.using(backupMetrics.getRegistry())
.withName(backupMetrics.uploadQueueSize)
.monitorSize(uploadQueue);
this.fileUploadExecutor =
MoreExecutors.listeningDecorator(
new BlockingSubmitThreadPoolExecutor(
configuration.getBackupThreads(),
uploadQueue,
configuration.getUploadTimeout()));
BlockingQueue<Runnable> downloadQueue =
new ArrayBlockingQueue<>(configuration.getDownloadQueueSize());
PolledMeter.using(backupMetrics.getRegistry())
.withName(backupMetrics.downloadQueueSize)
.monitorSize(downloadQueue);
this.fileDownloadExecutor =
new BlockingSubmitThreadPoolExecutor(
configuration.getRestoreThreads(),
downloadQueue,
configuration.getDownloadTimeout());
}
@Override
public Future<Path> asyncDownloadFile(final AbstractBackupPath path, final int retry)
throws RejectedExecutionException {
return fileDownloadExecutor.submit(
() -> {
downloadFile(path, "" /* suffix */, retry);
return Paths.get(path.getRemotePath());
});
}
@Override
public void downloadFile(final AbstractBackupPath path, String suffix, final int retry)
throws BackupRestoreException {
// TODO: Should we download the file if localPath already exists?
String remotePath = path.getRemotePath();
String localPath = path.newRestoreFile().getAbsolutePath() + suffix;
logger.info("Downloading file: {} to location: {}", path.getRemotePath(), localPath);
try {
new BoundedExponentialRetryCallable<Void>(500, 10000, retry) {
@Override
public Void retriableCall() throws Exception {
downloadFileImpl(path, suffix);
return null;
}
}.call();
// Note we only downloaded the bytes which are represented on file system (they are
// compressed and maybe encrypted).
// File size after decompression or decryption might be more/less.
backupMetrics.recordDownloadRate(getFileSize(remotePath));
backupMetrics.incrementValidDownloads();
logger.info("Successfully downloaded file: {} to location: {}", remotePath, localPath);
} catch (Exception e) {
backupMetrics.incrementInvalidDownloads();
logger.error("Error while downloading file: {} to location: {}", remotePath, localPath);
throw new BackupRestoreException(e.getMessage());
}
}
protected abstract void downloadFileImpl(final AbstractBackupPath path, String suffix)
throws BackupRestoreException;
@Override
public ListenableFuture<AbstractBackupPath> uploadAndDelete(
final AbstractBackupPath path, Instant target, boolean async)
throws RejectedExecutionException, BackupRestoreException {
if (async) {
return fileUploadExecutor.submit(
() -> uploadAndDeleteInternal(path, target, 10 /* retries */));
} else {
return Futures.immediateFuture(uploadAndDeleteInternal(path, target, 10 /* retries */));
}
}
@VisibleForTesting
public AbstractBackupPath uploadAndDeleteInternal(
final AbstractBackupPath path, Instant target, int retry)
throws RejectedExecutionException, BackupRestoreException {
Path localPath = Paths.get(path.getBackupFile().getAbsolutePath());
File localFile = localPath.toFile();
Preconditions.checkArgument(
localFile.exists(), String.format("Can't upload nonexistent %s", localPath));
Preconditions.checkArgument(
!localFile.isDirectory(),
String.format("Can only upload files %s is a directory", localPath));
Path remotePath = Paths.get(path.getRemotePath());
if (tasksQueued.add(localPath)) {
logger.info("Uploading file: {} to location: {}", localPath, remotePath);
try {
long uploadedFileSize;
// Upload file if it not present at remote location.
if (path.getType() != BackupFileType.SST_V2 || !checkObjectExists(remotePath)) {
backupNotificationMgr.notify(path, UploadStatus.STARTED);
uploadedFileSize =
new BoundedExponentialRetryCallable<Long>(
500 /* minSleep */, 10000 /* maxSleep */, retry) {
@Override
public Long retriableCall() throws Exception {
return uploadFileImpl(path, target);
}
}.call();
// Add to cache after successful upload.
// We only add SST_V2 as other file types are usually not checked, so no point
// evicting our SST_V2 results.
if (path.getType() == BackupFileType.SST_V2) addObjectCache(remotePath);
backupMetrics.recordUploadRate(uploadedFileSize);
backupMetrics.incrementValidUploads();
path.setCompressedFileSize(uploadedFileSize);
backupNotificationMgr.notify(path, UploadStatus.SUCCESS);
} else {
// file is already uploaded to remote file system.
logger.info("File: {} already present on remoteFileSystem.", remotePath);
}
logger.info(
"Successfully uploaded file: {} to location: {}", localPath, remotePath);
if (!FileUtils.deleteQuietly(localFile))
logger.warn(
String.format(
"Failed to delete local file %s.",
localFile.getAbsolutePath()));
} catch (Exception e) {
backupMetrics.incrementInvalidUploads();
logger.error(
"Error while uploading file: {} to location: {}. Exception: Msg: [{}], Trace: {}",
localPath,
remotePath,
e.getMessage(),
e);
backupNotificationMgr.notify(path, UploadStatus.FAILED);
throw new BackupRestoreException(e.getMessage());
} finally {
// Remove the task from the list so if we try to upload file ever again, we can.
tasksQueued.remove(localPath);
}
} else logger.info("Already in queue, no-op. File: {}", localPath);
return path;
}
private void addObjectCache(Path remotePath) {
objectCache.put(remotePath, Boolean.TRUE);
}
@Override
public boolean checkObjectExists(Path remotePath) {
// Check in cache, if remote file exists.
Boolean cacheResult = objectCache.getIfPresent(remotePath);
// Cache hit. Return the value.
if (cacheResult != null) return cacheResult;
// Cache miss - Check remote file system if object exist.
boolean remoteFileExist = doesRemoteFileExist(remotePath);
if (remoteFileExist) addObjectCache(remotePath);
return remoteFileExist;
}
@Override
public void deleteRemoteFiles(List<Path> remotePaths) throws BackupRestoreException {
if (remotePaths == null) return;
// Note that we are trying to implement write-thru cache here so it is good idea to
// invalidate the cache first. This is important so that if there is any issue (because file
// was deleted), it is caught by our snapshot job we can re-upload the file. This will also
// help in ensuring that our validation job fails if there are any error caused due to TTL
// of a file.
objectCache.invalidateAll(remotePaths);
deleteFiles(remotePaths);
}
protected abstract void deleteFiles(List<Path> remotePaths) throws BackupRestoreException;
protected abstract boolean doesRemoteFileExist(Path remotePath);
protected abstract long uploadFileImpl(final AbstractBackupPath path, Instant target)
throws BackupRestoreException;
@Override
public String getShard() {
return getPrefix().getName(0).toString();
}
@Override
public Path getPrefix() {
Path prefix = Paths.get(configuration.getBackupPrefix());
if (StringUtils.isNotBlank(configuration.getRestorePrefix())) {
prefix = Paths.get(configuration.getRestorePrefix());
}
return prefix;
}
@Override
public Iterator<AbstractBackupPath> listPrefixes(Date date) {
String prefix = pathProvider.get().clusterPrefix(getPrefix().toString());
Iterator<String> fileIterator = listFileSystem(prefix, File.pathSeparator, null);
//noinspection unchecked
return new TransformIterator(
fileIterator,
remotePath -> {
AbstractBackupPath abstractBackupPath = pathProvider.get();
abstractBackupPath.parsePartialPrefix(remotePath.toString());
return abstractBackupPath;
});
}
@Override
public Iterator<AbstractBackupPath> list(String path, Date start, Date till) {
String prefix = pathProvider.get().remotePrefix(start, till, path);
Iterator<String> fileIterator = listFileSystem(prefix, null, null);
@SuppressWarnings("unchecked")
TransformIterator<String, AbstractBackupPath> transformIterator =
new TransformIterator(
fileIterator,
remotePath -> {
AbstractBackupPath abstractBackupPath = pathProvider.get();
abstractBackupPath.parseRemote(remotePath.toString());
return abstractBackupPath;
});
return new FilterIterator<>(
transformIterator,
abstractBackupPath ->
(abstractBackupPath.getTime().after(start)
&& abstractBackupPath.getTime().before(till))
|| abstractBackupPath.getTime().equals(start));
}
@Override
public int getUploadTasksQueued() {
return tasksQueued.size();
}
@Override
public int getDownloadTasksQueued() {
return fileDownloadExecutor.getQueue().size();
}
@Override
public void clearCache() {
objectCache.invalidateAll();
}
}
| 3,348 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/AbstractBackupPath.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backup;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSet;
import com.google.inject.ImplementedBy;
import com.netflix.priam.aws.RemoteBackupPath;
import com.netflix.priam.compress.CompressionType;
import com.netflix.priam.config.BackupsToCompress;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.cryptography.CryptographyAlgorithm;
import com.netflix.priam.identity.InstanceIdentity;
import com.netflix.priam.utils.DateUtil;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.attribute.BasicFileAttributes;
import java.time.Instant;
import java.util.Date;
import java.util.Optional;
import org.apache.commons.lang3.StringUtils;
@ImplementedBy(RemoteBackupPath.class)
public abstract class AbstractBackupPath implements Comparable<AbstractBackupPath> {
public static final char PATH_SEP = File.separatorChar;
public static final Joiner PATH_JOINER = Joiner.on(PATH_SEP);
public enum BackupFileType {
CL,
META,
META_V2,
SNAP,
SNAPSHOT_VERIFIED,
SST,
SST_V2;
private static final ImmutableSet<BackupFileType> DATA_FILE_TYPES =
ImmutableSet.of(SNAP, SST, SST_V2);
private static final ImmutableSet<BackupFileType> V2_FILE_TYPES =
ImmutableSet.of(SST_V2, META_V2);
public static boolean isDataFile(BackupFileType type) {
return DATA_FILE_TYPES.contains(type);
}
public static boolean isV2(BackupFileType type) {
return V2_FILE_TYPES.contains(type);
}
public static BackupFileType fromString(String s) throws BackupRestoreException {
try {
return BackupFileType.valueOf(s);
} catch (IllegalArgumentException e) {
throw new BackupRestoreException(String.format("Unknown BackupFileType %s", s));
}
}
}
protected BackupFileType type;
protected String clusterName;
protected String keyspace;
protected String columnFamily;
protected String fileName;
protected String baseDir;
protected String token;
protected String region;
protected Date time;
private long size; // uncompressed file size
private long compressedFileSize = 0;
protected final InstanceIdentity instanceIdentity;
protected final IConfiguration config;
protected File backupFile;
private Instant lastModified;
private Instant creationTime;
private Date uploadedTs;
private CompressionType compression;
private CryptographyAlgorithm encryption = CryptographyAlgorithm.PLAINTEXT;
private boolean isIncremental;
public AbstractBackupPath(IConfiguration config, InstanceIdentity instanceIdentity) {
this.instanceIdentity = instanceIdentity;
this.config = config;
this.compression =
config.getBackupsToCompress() == BackupsToCompress.NONE
? CompressionType.NONE
: CompressionType.SNAPPY;
}
public void parseLocal(File file, BackupFileType type) {
this.backupFile = file;
this.baseDir = config.getBackupLocation();
this.clusterName = config.getAppName();
this.fileName = file.getName();
BasicFileAttributes fileAttributes;
try {
fileAttributes = Files.readAttributes(file.toPath(), BasicFileAttributes.class);
this.lastModified = fileAttributes.lastModifiedTime().toInstant();
this.creationTime = fileAttributes.creationTime().toInstant();
this.size = fileAttributes.size();
} catch (IOException e) {
this.lastModified = Instant.ofEpochMilli(0L);
this.creationTime = Instant.ofEpochMilli(0L);
this.size = 0L;
}
this.region = instanceIdentity.getInstanceInfo().getRegion();
this.token = instanceIdentity.getInstance().getToken();
this.type = type;
String rpath =
new File(config.getDataFileLocation()).toURI().relativize(file.toURI()).getPath();
String[] parts = rpath.split("" + PATH_SEP);
if (BackupFileType.isDataFile(type)) {
this.keyspace = parts[0];
this.columnFamily = parts[1];
}
if (BackupFileType.isDataFile(type)) {
Optional<BackupFolder> folder = BackupFolder.fromName(parts[2]);
this.isIncremental = folder.filter(BackupFolder.BACKUPS::equals).isPresent();
}
/*
1. For old style snapshots, make this value to time at which backup was executed.
2. This is to ensure that all the files from the snapshot are uploaded under single directory in remote file system.
3. For META file we always override the time field via @link{Metadata#decorateMetaJson}
*/
this.time =
type == BackupFileType.SNAP
? DateUtil.getDate(parts[3])
: new Date(lastModified.toEpochMilli());
}
/** Given a date range, find a common string prefix Eg: 20120212, 20120213 = 2012021 */
protected String match(Date start, Date end) {
String sString = DateUtil.formatyyyyMMddHHmm(start); // formatDate(start);
String eString = DateUtil.formatyyyyMMddHHmm(end); // formatDate(end);
int diff = StringUtils.indexOfDifference(sString, eString);
if (diff < 0) return sString;
return sString.substring(0, diff);
}
/** Local restore file */
public File newRestoreFile() {
File return_;
String dataDir = config.getDataFileLocation();
switch (type) {
case CL:
return_ = new File(PATH_JOINER.join(config.getBackupCommitLogLocation(), fileName));
break;
case META:
case META_V2:
return_ = new File(PATH_JOINER.join(config.getDataFileLocation(), fileName));
break;
default:
return_ = new File(PATH_JOINER.join(dataDir, keyspace, columnFamily, fileName));
}
File parent = new File(return_.getParent());
if (!parent.exists()) parent.mkdirs();
return return_;
}
@Override
public int compareTo(AbstractBackupPath o) {
return getRemotePath().compareTo(o.getRemotePath());
}
@Override
public boolean equals(Object obj) {
return obj.getClass().equals(this.getClass())
&& getRemotePath().equals(((AbstractBackupPath) obj).getRemotePath());
}
/** Get remote prefix for this path object */
public abstract String getRemotePath();
/** Parses a fully constructed remote path */
public abstract void parseRemote(String remoteFilePath);
/** Parses paths with just token prefixes */
public abstract void parsePartialPrefix(String remoteFilePath);
/**
* Provides a common prefix that matches all objects that fall between the start and end time
*/
public abstract String remotePrefix(Date start, Date end, String location);
public abstract Path remoteV2Prefix(Path location, BackupFileType fileType);
/** Provides the cluster prefix */
public abstract String clusterPrefix(String location);
public BackupFileType getType() {
return type;
}
public void setType(BackupFileType type) {
this.type = type;
}
public String getClusterName() {
return clusterName;
}
public String getKeyspace() {
return keyspace;
}
public String getColumnFamily() {
return columnFamily;
}
public String getFileName() {
return fileName;
}
public String getToken() {
return token;
}
public String getRegion() {
return region;
}
public Date getTime() {
return time;
}
public void setTime(Date time) {
this.time = time;
}
/*
@return original, uncompressed file size
*/
public long getSize() {
return size;
}
public void setSize(long size) {
this.size = size;
}
public long getCompressedFileSize() {
return this.compressedFileSize;
}
public void setCompressedFileSize(long val) {
this.compressedFileSize = val;
}
public File getBackupFile() {
return backupFile;
}
public void setFileName(String fileName) {
this.fileName = fileName;
}
public InstanceIdentity getInstanceIdentity() {
return this.instanceIdentity;
}
public void setUploadedTs(Date uploadedTs) {
this.uploadedTs = uploadedTs;
}
public Date getUploadedTs() {
return this.uploadedTs;
}
public Instant getLastModified() {
return lastModified;
}
public void setLastModified(Instant instant) {
this.lastModified = instant;
}
public Instant getCreationTime() {
return creationTime;
}
@VisibleForTesting
public void setCreationTime(Instant instant) {
this.creationTime = instant;
}
public CompressionType getCompression() {
return compression;
}
public void setCompression(CompressionType compressionType) {
this.compression = compressionType;
}
public CryptographyAlgorithm getEncryption() {
return encryption;
}
public void setEncryption(String encryption) {
this.encryption = CryptographyAlgorithm.valueOf(encryption);
}
public boolean isIncremental() {
return isIncremental;
}
@Override
public String toString() {
return "From: " + getRemotePath() + " To: " + newRestoreFile().getPath();
}
}
| 3,349 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/BackupMetadata.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.backup;
import com.netflix.priam.utils.DateUtil;
import com.netflix.priam.utils.GsonJsonSerializer;
import java.io.Serializable;
import java.util.Date;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** POJO to encapsulate the metadata for a snapshot Created by aagrawal on 1/31/17. */
public final class BackupMetadata implements Serializable {
private static final Logger logger = LoggerFactory.getLogger(BackupMetadata.class);
private String snapshotDate;
private String token;
private Date start, completed;
private Status status;
private boolean cassandraSnapshotSuccess;
private Date lastValidated;
private BackupVersion backupVersion;
private String snapshotLocation;
public BackupMetadata(BackupVersion backupVersion, String token, Date start) {
if (start == null || token == null || StringUtils.isEmpty(token))
throw new IllegalArgumentException(
String.format(
"Invalid Input: Token: %s or start date: %s is null or empty.",
token, start));
this.backupVersion = backupVersion;
this.snapshotDate = DateUtil.formatyyyyMMdd(start);
this.token = token;
this.start = start;
this.status = Status.STARTED;
this.cassandraSnapshotSuccess = false;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || this.getClass() != o.getClass()) return false;
BackupMetadata that = (BackupMetadata) o;
return this.snapshotDate.equals(that.snapshotDate)
&& this.token.equals(that.token)
&& this.start.equals(that.start)
&& this.backupVersion.equals(that.backupVersion);
}
@Override
public int hashCode() {
int result = this.snapshotDate.hashCode();
result = 31 * result + this.token.hashCode();
result = 31 * result + this.start.hashCode();
result = 31 * result + this.backupVersion.hashCode();
return result;
}
/**
* Get the snapshot date formatted in yyyyMMdd.
*
* @return snapshot date formatted in yyyyMMdd.
*/
public String getSnapshotDate() {
return this.snapshotDate;
}
/**
* Get the token for which snapshot was initiated.
*
* @return snapshot token.
*/
public String getToken() {
return this.token;
}
/**
* Get the start date on which snapshot was initiated.
*
* @return start date of snapshot.
*/
public Date getStart() {
return this.start;
}
/**
* Get the date on which snapshot was marked as finished/failed etc.
*
* @return completion date of snapshot.
*/
public Date getCompleted() {
return this.completed;
}
/**
* Get the status of the snapshot.
*
* @return snapshot status
*/
public Status getStatus() {
return this.status;
}
/**
* Set the completion date of snashot status.
*
* @param completed date of completion for a snapshot.
*/
public void setCompleted(Date completed) {
this.completed = completed;
}
/**
* Set the status of the snapshot.
*
* @param status of the snapshot.
*/
public void setStatus(Status status) {
this.status = status;
}
/**
* Get the snapshot location where snapshot is uploaded.
*
* @return snapshot upload location for the meta file.
*/
public String getSnapshotLocation() {
return this.snapshotLocation;
}
/**
* Set the snapshot location where snapshot is uploaded.
*
* @param snapshotLocation where snapshot meta file is uploaded.
*/
public void setSnapshotLocation(String snapshotLocation) {
this.snapshotLocation = snapshotLocation;
}
/**
* Find if cassandra snapshot was successful or not. This is a JMX operation and it is possible
* that this operation failed.
*
* @return cassandra snapshot status.
*/
public boolean isCassandraSnapshotSuccess() {
return cassandraSnapshotSuccess;
}
/**
* Set the cassandra snapshot status to be either finished successfully or fail.
*
* @param cassandraSnapshotSuccess is set to success if JMX operation for snapshot is
* successful.
*/
public void setCassandraSnapshotSuccess(boolean cassandraSnapshotSuccess) {
this.cassandraSnapshotSuccess = cassandraSnapshotSuccess;
}
/**
* Get the backup version for the snapshot.
*
* @return backup version of the snapshot.
*/
public BackupVersion getBackupVersion() {
return backupVersion;
}
/**
* Return the last validation timestamp of this backup metadata. Validation of backup implies
* finding if all the files are successfully stored in remote file system.
*
* @return date of last backup validation.
*/
public Date getLastValidated() {
return lastValidated;
}
/**
* Set the last validation date of backup metadata.
*
* @param lastValidated date value of backup validation.
*/
public void setLastValidated(Date lastValidated) {
this.lastValidated = lastValidated;
}
@Override
public String toString() {
return GsonJsonSerializer.getGson().toJson(this);
}
}
| 3,350 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/DirectorySize.java | package com.netflix.priam.backup;
import com.google.inject.ImplementedBy;
/** estimates the number of bytes remaining to upload in a snapshot */
@ImplementedBy(SnapshotDirectorySize.class)
public interface DirectorySize {
/** return the total bytes of all snapshot files south of location in the filesystem */
long getBytes(String location);
}
| 3,351 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/BackupFileSystemContext.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.backup;
import com.netflix.priam.config.IConfiguration;
import javax.inject.Inject;
import javax.inject.Named;
public class BackupFileSystemContext implements IFileSystemContext {
private IBackupFileSystem fs = null, encryptedFs = null;
@Inject
public BackupFileSystemContext(
@Named("backup") IBackupFileSystem fs,
@Named("encryptedbackup") IBackupFileSystem encryptedFs) {
this.fs = fs;
this.encryptedFs = encryptedFs;
}
public IBackupFileSystem getFileStrategy(IConfiguration config) {
if (!config.isEncryptBackupEnabled()) {
return this.fs;
} else {
return this.encryptedFs;
}
}
}
| 3,352 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/SnapshotBackup.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backup;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.priam.backup.AbstractBackupPath.BackupFileType;
import com.netflix.priam.backupv2.ForgottenFilesManager;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.connection.CassandraOperations;
import com.netflix.priam.health.CassandraMonitor;
import com.netflix.priam.identity.InstanceIdentity;
import com.netflix.priam.scheduler.CronTimer;
import com.netflix.priam.scheduler.TaskTimer;
import com.netflix.priam.utils.DateUtil;
import com.netflix.priam.utils.ThreadSleeper;
import java.io.File;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.Instant;
import java.util.*;
import java.util.concurrent.Future;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Task for running daily snapshots */
@Singleton
public class SnapshotBackup extends AbstractBackup {
private static final Logger logger = LoggerFactory.getLogger(SnapshotBackup.class);
public static final String JOBNAME = "SnapshotBackup";
private final MetaData metaData;
private final ThreadSleeper sleeper = new ThreadSleeper();
private static final long WAIT_TIME_MS = 60 * 1000 * 10;
private final InstanceIdentity instanceIdentity;
private final IBackupStatusMgr snapshotStatusMgr;
private final BackupRestoreUtil backupRestoreUtil;
private final ForgottenFilesManager forgottenFilesManager;
private String snapshotName = null;
private Instant snapshotInstant = DateUtil.getInstant();
private List<AbstractBackupPath> abstractBackupPaths = null;
private final CassandraOperations cassandraOperations;
private final BackupHelper backupHelper;
private static final Lock lock = new ReentrantLock();
@Inject
public SnapshotBackup(
IConfiguration config,
BackupHelper backupHelper,
MetaData metaData,
IBackupStatusMgr snapshotStatusMgr,
InstanceIdentity instanceIdentity,
CassandraOperations cassandraOperations,
ForgottenFilesManager forgottenFilesManager) {
super(config);
this.backupHelper = backupHelper;
this.metaData = metaData;
this.snapshotStatusMgr = snapshotStatusMgr;
this.instanceIdentity = instanceIdentity;
this.cassandraOperations = cassandraOperations;
backupRestoreUtil =
new BackupRestoreUtil(
config.getSnapshotIncludeCFList(), config.getSnapshotExcludeCFList());
this.forgottenFilesManager = forgottenFilesManager;
}
@Override
public void execute() throws Exception {
// If Cassandra is started then only start Snapshot Backup
while (!CassandraMonitor.hasCassadraStarted()) {
logger.debug(
"Cassandra has not yet started, hence Snapshot Backup will start after ["
+ WAIT_TIME_MS / 1000
+ "] secs ...");
sleeper.sleep(WAIT_TIME_MS);
}
// Do not allow more than one snapshot to run at the same time. This is possible as this
// happens on CRON.
if (!lock.tryLock()) {
logger.warn("Snapshot Operation is already running! Try again later.");
throw new Exception("Snapshot Operation already running");
}
try {
// Clean up all the backup directories, if any.
cleanOldBackups(config);
executeSnapshot();
} finally {
lock.unlock();
}
}
private void executeSnapshot() throws Exception {
Date startTime = Calendar.getInstance(TimeZone.getTimeZone("GMT")).getTime();
snapshotName = DateUtil.formatyyyyMMddHHmm(startTime);
snapshotInstant = DateUtil.getInstant();
String token = instanceIdentity.getInstance().getToken();
// Save start snapshot status
BackupMetadata backupMetadata =
new BackupMetadata(BackupVersion.SNAPSHOT_BACKUP, token, startTime);
snapshotStatusMgr.start(backupMetadata);
try {
logger.info("Starting snapshot {}", snapshotName);
cassandraOperations.takeSnapshot(snapshotName);
backupMetadata.setCassandraSnapshotSuccess(true);
// Collect all snapshot dir's under keyspace dir's
abstractBackupPaths = Lists.newArrayList();
// Try to upload all the files as part of snapshot. If there is any error, there will be
// an exception and snapshot will be considered as failure.
initiateBackup(SNAPSHOT_FOLDER, backupRestoreUtil);
// All the files are uploaded successfully as part of snapshot.
// pre condition notify of meta.json upload
File tmpMetaFile = metaData.createTmpMetaFile();
// Note: no need to remove this temp as it is done within createTmpMetaFile()
AbstractBackupPath metaJsonAbp = metaData.decorateMetaJson(tmpMetaFile, snapshotName);
// Upload meta file
AbstractBackupPath metaJson = metaData.set(abstractBackupPaths, snapshotName);
logger.info("Snapshot upload complete for {}", snapshotName);
backupMetadata.setSnapshotLocation(
config.getBackupPrefix() + File.separator + metaJson.getRemotePath());
snapshotStatusMgr.finish(backupMetadata);
} catch (Exception e) {
logger.error(
"Exception occurred while taking snapshot: {}. Exception: {}",
snapshotName,
e.getLocalizedMessage());
e.printStackTrace();
snapshotStatusMgr.failed(backupMetadata);
throw e;
} finally {
try {
cassandraOperations.clearSnapshot(snapshotName);
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
}
private File getValidSnapshot(File snpDir, String snapshotName) {
for (File snapshotDir : snpDir.listFiles())
if (snapshotDir.getName().matches(snapshotName)) return snapshotDir;
return null;
}
@Override
public String getName() {
return JOBNAME;
}
public static boolean isBackupEnabled(IConfiguration config) throws Exception {
return (getTimer(config) != null);
}
public static TaskTimer getTimer(IConfiguration config) throws Exception {
TaskTimer timer = CronTimer.getCronTimer(JOBNAME, config.getBackupCronExpression());
if (timer == null) {
// Clean up all the backup directories, if any.
cleanOldBackups(config);
}
return timer;
}
private static void cleanOldBackups(IConfiguration configuration) throws Exception {
Set<Path> backupPaths = AbstractBackup.getBackupDirectories(configuration, SNAPSHOT_FOLDER);
for (Path backupDirPath : backupPaths)
try (DirectoryStream<Path> directoryStream =
Files.newDirectoryStream(backupDirPath, path -> Files.isDirectory(path))) {
for (Path backupDir : directoryStream) {
if (isValidBackupDir(backupDir)) {
FileUtils.deleteDirectory(backupDir.toFile());
}
}
}
}
@Override
protected void processColumnFamily(File backupDir) throws Exception {
File snapshotDir = getValidSnapshot(backupDir, snapshotName);
if (snapshotDir == null) {
logger.warn("{} folder does not contain {} snapshots", backupDir, snapshotName);
return;
}
forgottenFilesManager.findAndMoveForgottenFiles(snapshotInstant, snapshotDir);
// Add files to this dir
ImmutableList<ListenableFuture<AbstractBackupPath>> futures =
backupHelper.uploadAndDeleteAllFiles(
snapshotDir, BackupFileType.SNAP, config.enableAsyncSnapshot());
for (Future<AbstractBackupPath> future : futures) {
abstractBackupPaths.add(future.get());
}
}
private static boolean isValidBackupDir(Path backupDir) {
String backupDirName = backupDir.toFile().getName();
// Check if it of format yyyyMMddHHmm
return (DateUtil.getDate(backupDirName) != null);
}
}
| 3,353 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/AbstractBackup.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backup;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.scheduler.Task;
import com.netflix.priam.utils.SystemUtils;
import java.io.File;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.HashSet;
import java.util.Set;
import javax.inject.Inject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Abstract Backup class for uploading files to backup location */
public abstract class AbstractBackup extends Task {
private static final Logger logger = LoggerFactory.getLogger(AbstractBackup.class);
static final String INCREMENTAL_BACKUP_FOLDER = "backups";
public static final String SNAPSHOT_FOLDER = "snapshots";
@Inject
public AbstractBackup(IConfiguration config) {
super(config);
}
protected final void initiateBackup(
String monitoringFolder, BackupRestoreUtil backupRestoreUtil) throws Exception {
File dataDir = new File(config.getDataFileLocation());
if (!dataDir.exists() || !dataDir.isDirectory()) {
throw new IllegalArgumentException(
"The configured 'data file location' does not exist or is not a directory: "
+ config.getDataFileLocation());
}
logger.debug("Scanning for backup in: {}", dataDir.getAbsolutePath());
File[] keyspaceDirectories = dataDir.listFiles();
if (keyspaceDirectories == null) return;
for (File keyspaceDir : keyspaceDirectories) {
if (keyspaceDir.isFile()) continue;
logger.debug("Entering {} keyspace..", keyspaceDir.getName());
File[] columnFamilyDirectories = keyspaceDir.listFiles();
if (columnFamilyDirectories == null) continue;
for (File columnFamilyDir : columnFamilyDirectories) {
File backupDir = new File(columnFamilyDir, monitoringFolder);
if (isAReadableDirectory(backupDir)) {
String columnFamilyName = getColumnFamily(backupDir);
if (backupRestoreUtil.isFiltered(keyspaceDir.getName(), columnFamilyName)) {
// Clean the backup/snapshot directory else files will keep getting
// accumulated.
SystemUtils.cleanupDir(backupDir.getAbsolutePath(), null);
} else {
processColumnFamily(backupDir);
}
}
} // end processing all CFs for keyspace
} // end processing keyspaces under the C* data dir
}
protected String getColumnFamily(File backupDir) {
return backupDir.getParentFile().getName().split("-")[0];
}
protected String getKeyspace(File backupDir) {
return backupDir.getParentFile().getParentFile().getName();
}
/**
* Process the columnfamily in a given snapshot/backup directory.
*
* @param backupDir Location of the backup/snapshot directory in that columnfamily.
* @throws Exception throws exception if there is any error in process the directory.
*/
protected abstract void processColumnFamily(File backupDir) throws Exception;
/**
* Get all the backup directories for Cassandra.
*
* @param config to get the location of the data folder.
* @param monitoringFolder folder where cassandra backup's are configured.
* @return Set of the path(s) containing the backup folder for each columnfamily.
* @throws Exception incase of IOException.
*/
public static Set<Path> getBackupDirectories(IConfiguration config, String monitoringFolder)
throws Exception {
HashSet<Path> backupPaths = new HashSet<>();
if (config.getDataFileLocation() == null) return backupPaths;
Path dataPath = Paths.get(config.getDataFileLocation());
if (Files.exists(dataPath) && Files.isDirectory(dataPath))
try (DirectoryStream<Path> directoryStream =
Files.newDirectoryStream(dataPath, path -> Files.isDirectory(path))) {
for (Path keyspaceDirPath : directoryStream) {
try (DirectoryStream<Path> keyspaceStream =
Files.newDirectoryStream(
keyspaceDirPath, path -> Files.isDirectory(path))) {
for (Path columnfamilyDirPath : keyspaceStream) {
Path backupDirPath =
Paths.get(columnfamilyDirPath.toString(), monitoringFolder);
if (Files.exists(backupDirPath) && Files.isDirectory(backupDirPath)) {
logger.debug("Backup folder: {}", backupDirPath);
backupPaths.add(backupDirPath);
}
}
}
}
}
return backupPaths;
}
protected static boolean isAReadableDirectory(File dir) {
return dir.exists() && dir.isDirectory() && dir.canRead();
}
}
| 3,354 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/BackupVerification.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.backup;
import com.netflix.priam.backupv2.IMetaProxy;
import com.netflix.priam.utils.DateUtil;
import com.netflix.priam.utils.DateUtil.DateRange;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Optional;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Provider;
import javax.inject.Singleton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Created by aagrawal on 2/16/17. This class validates the backup by doing listing of files in the
* backup destination and comparing with meta.json by downloading from the location. Input:
* BackupMetadata that needs to be verified.
*/
@Singleton
public class BackupVerification {
private static final Logger logger = LoggerFactory.getLogger(BackupVerification.class);
private final IMetaProxy metaV1Proxy;
private final IMetaProxy metaV2Proxy;
private final IBackupStatusMgr backupStatusMgr;
private final Provider<AbstractBackupPath> abstractBackupPathProvider;
private BackupVerificationResult latestResult;
@Inject
public BackupVerification(
@Named("v1") IMetaProxy metaV1Proxy,
@Named("v2") IMetaProxy metaV2Proxy,
IBackupStatusMgr backupStatusMgr,
Provider<AbstractBackupPath> abstractBackupPathProvider) {
this.metaV1Proxy = metaV1Proxy;
this.metaV2Proxy = metaV2Proxy;
this.backupStatusMgr = backupStatusMgr;
this.abstractBackupPathProvider = abstractBackupPathProvider;
}
public IMetaProxy getMetaProxy(BackupVersion backupVersion) {
switch (backupVersion) {
case SNAPSHOT_BACKUP:
return metaV1Proxy;
case SNAPSHOT_META_SERVICE:
return metaV2Proxy;
}
return null;
}
public Optional<BackupVerificationResult> verifyLatestBackup(
BackupVersion backupVersion, boolean force, DateRange dateRange)
throws IllegalArgumentException {
IMetaProxy metaProxy = getMetaProxy(backupVersion);
for (BackupMetadata backupMetadata :
backupStatusMgr.getLatestBackupMetadata(backupVersion, dateRange)) {
if (backupMetadata.getLastValidated() == null || force) {
Optional<BackupVerificationResult> result = verifyBackup(metaProxy, backupMetadata);
if (result.isPresent()) {
return result;
}
} else {
updateLatestResult(backupMetadata);
return Optional.of(latestResult);
}
}
latestResult = null;
return Optional.empty();
}
public List<BackupMetadata> verifyBackupsInRange(
BackupVersion backupVersion, DateRange dateRange) throws IllegalArgumentException {
IMetaProxy metaProxy = getMetaProxy(backupVersion);
List<BackupMetadata> results = new ArrayList<>();
for (BackupMetadata backupMetadata :
backupStatusMgr.getLatestBackupMetadata(backupVersion, dateRange)) {
if (backupMetadata.getLastValidated() != null
|| verifyBackup(metaProxy, backupMetadata).isPresent()) {
results.add(backupMetadata);
}
}
return results;
}
/** returns the latest valid backup verification result if we have found one within the SLO * */
public Optional<Instant> getLatestVerfifiedBackupTime() {
return latestResult == null ? Optional.empty() : Optional.of(latestResult.snapshotInstant);
}
private Optional<BackupVerificationResult> verifyBackup(
IMetaProxy metaProxy, BackupMetadata latestBackupMetaData) {
Path metadataLocation = Paths.get(latestBackupMetaData.getSnapshotLocation());
metadataLocation = metadataLocation.subpath(1, metadataLocation.getNameCount());
AbstractBackupPath abstractBackupPath = abstractBackupPathProvider.get();
abstractBackupPath.parseRemote(metadataLocation.toString());
BackupVerificationResult result = metaProxy.isMetaFileValid(abstractBackupPath);
if (result.valid) {
updateLatestResult(latestBackupMetaData);
Date now = new Date(DateUtil.getInstant().toEpochMilli());
latestBackupMetaData.setLastValidated(now);
backupStatusMgr.update(latestBackupMetaData);
return Optional.of(result);
}
return Optional.empty();
}
private void updateLatestResult(BackupMetadata backupMetadata) {
Instant snapshotInstant = backupMetadata.getStart().toInstant();
if (latestResult == null || latestResult.snapshotInstant.isBefore(snapshotInstant)) {
latestResult = new BackupVerificationResult();
latestResult.valid = true;
latestResult.manifestAvailable = true;
latestResult.snapshotInstant = backupMetadata.getStart().toInstant();
Path snapshotLocation = Paths.get(backupMetadata.getSnapshotLocation());
latestResult.remotePath =
snapshotLocation.subpath(1, snapshotLocation.getNameCount()).toString();
}
}
}
| 3,355 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/BackupService.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backup;
import com.netflix.priam.aws.UpdateCleanupPolicy;
import com.netflix.priam.config.IBackupRestoreConfig;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.defaultimpl.IService;
import com.netflix.priam.scheduler.PriamScheduler;
import com.netflix.priam.scheduler.TaskTimer;
import com.netflix.priam.tuner.CassandraTunerService;
import javax.inject.Inject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Encapsulate the backup service 1.0 - Execute all the tasks required to run backup service.
*
* <p>Created by aagrawal on 3/9/19.
*/
public class BackupService implements IService {
private final PriamScheduler scheduler;
private final IConfiguration config;
private final IBackupRestoreConfig backupRestoreConfig;
private final CassandraTunerService cassandraTunerService;
private static final Logger logger = LoggerFactory.getLogger(BackupService.class);
@Inject
public BackupService(
IConfiguration config,
IBackupRestoreConfig backupRestoreConfig,
PriamScheduler priamScheduler,
CassandraTunerService cassandraTunerService) {
this.config = config;
this.backupRestoreConfig = backupRestoreConfig;
this.scheduler = priamScheduler;
this.cassandraTunerService = cassandraTunerService;
}
@Override
public void scheduleService() throws Exception {
// Start the snapshot backup schedule - Always run this. (If you want to
// set it off, set backup hour to -1) or set backup cron to "-1"
TaskTimer snapshotTimer = SnapshotBackup.getTimer(config);
scheduleTask(scheduler, SnapshotBackup.class, snapshotTimer);
if (snapshotTimer != null) {
// Set cleanup
scheduleTask(scheduler, UpdateCleanupPolicy.class, UpdateCleanupPolicy.getTimer());
// Schedule commit log task
scheduleTask(
scheduler, CommitLogBackupTask.class, CommitLogBackupTask.getTimer(config));
}
// Start the Incremental backup schedule if enabled
scheduleTask(
scheduler,
IncrementalBackup.class,
IncrementalBackup.getTimer(config, backupRestoreConfig));
}
@Override
public void updateServicePre() throws Exception {
// Run the task to tune Cassandra
cassandraTunerService.onChangeUpdateService();
}
@Override
public void updateServicePost() throws Exception {}
}
| 3,356 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/IBackupFileSystem.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.backup;
import com.google.common.util.concurrent.ListenableFuture;
import java.io.FileNotFoundException;
import java.nio.file.Path;
import java.time.Instant;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.Future;
import java.util.concurrent.RejectedExecutionException;
/** Interface representing a backup storage as a file system */
public interface IBackupFileSystem {
/**
* Download the file denoted by remotePath to the local file system denoted by local path.
*
* @param path Backup path representing a local and remote file pair
* @param retry No. of times to retry to download a file from remote file system. If <1, it
* will try to download file exactly once.
* @throws BackupRestoreException if file is not available, downloadable or any other error from
* remote file system.
*/
void downloadFile(AbstractBackupPath path, String suffix, int retry)
throws BackupRestoreException;
/**
* Download the file denoted by remotePath in an async fashion to the local file system denoted
* by local path.
*
* @param path Backup path representing a local and remote file pair
* @param retry No. of times to retry to download a file from remote file system. If <1, it
* will try to download file exactly once.
* @return The future of the async job to monitor the progress of the job.
* @throws BackupRestoreException if file is not available, downloadable or any other error from
* remote file system.
* @throws RejectedExecutionException if the queue is full and TIMEOUT is reached while trying
* to add the work to the queue.
*/
Future<Path> asyncDownloadFile(final AbstractBackupPath path, final int retry)
throws BackupRestoreException, RejectedExecutionException;
/** Overload that uploads as fast as possible without any custom throttling */
default void uploadAndDelete(AbstractBackupPath path, boolean async)
throws FileNotFoundException, BackupRestoreException {
uploadAndDelete(path, Instant.EPOCH, async);
}
/**
* Upload the local file to its remote counterpart in an optionally async fashion. Both
* locations are embedded within the path parameter. De-duping of the file to upload will always
* be done by comparing the files-in-progress to be uploaded. This may result in this particular
* request to not to be executed e.g. if any other thread has given the same file to upload and
* that file is in internal queue. Note that de-duping is best effort and is not always
* guaranteed as we try to avoid lock on read/write of the files-in-progress. Once uploaded,
* files are deleted. Uploads are retried 10 times.
*
* @param path AbstractBackupPath to be used to send backup notifications only.
* @param target The target time of completion of all files in the upload.
* @param async boolean to determine whether the call should block or return immediately and
* upload asynchronously
* @return The future of the async job to monitor the progress of the job. This will be null if
* file was de-duped for upload.
* @throws BackupRestoreException in case of failure to upload for any reason including file not
* readable or remote file system errors.
* @throws FileNotFoundException If a file as denoted by localPath is not available or is a
* directory.
* @throws RejectedExecutionException if the queue is full and TIMEOUT is reached while trying
* to add the work to the queue.
*/
ListenableFuture<AbstractBackupPath> uploadAndDelete(
final AbstractBackupPath path, Instant target, boolean async)
throws FileNotFoundException, RejectedExecutionException, BackupRestoreException;
/**
* Get the shard where this object should be stored. For local file system this should be empty
* or null. For S3, it would be the location of the bucket.
*
* @return the location of the shard.
*/
default String getShard() {
return "";
}
/**
* Get the prefix path for the backup file system. This will be either the location of the
* remote file system for backup or the location from where we should restore.
*
* @return prefix path to the backup file system.
*/
Path getPrefix();
/**
* List all files in the backup location for the specified time range.
*
* @param path This is used as the `prefix` for listing files in the filesystem. All the files
* that start with this prefix will be returned.
* @param start Start date of the file upload.
* @param till End date of the file upload.
* @return Iterator of the AbstractBackupPath matching the criteria.
*/
Iterator<AbstractBackupPath> list(String path, Date start, Date till);
/** Get a list of prefixes for the cluster available in backup for the specified date */
Iterator<AbstractBackupPath> listPrefixes(Date date);
/**
* List all the files with the given prefix, delimiter, and marker. Files should be returned
* ordered by last modified time descending. This should never return null.
*
* @param prefix Common prefix of the elements to search in the backup file system.
* @param delimiter All the object will end with this delimiter.
* @param marker Start the fetch with this as the first object.
* @return the iterator on the backup file system containing path of the files.
*/
Iterator<String> listFileSystem(String prefix, String delimiter, String marker);
/** Runs cleanup or set retention */
void cleanup();
/** Give the file system a chance to terminate any thread pools, etc. */
void shutdown();
/**
* Get the size of the remote object
*
* @param remotePath Location of the object on the remote file system.
* @return size of the object on the remote filesystem.
* @throws BackupRestoreException in case of failure to read object denoted by remotePath or any
* other error.
*/
long getFileSize(String remotePath) throws BackupRestoreException;
/**
* Checks if the file denoted by remotePath exists on the remote file system. It does not need
* check if object was completely uploaded to remote file system.
*
* @param remotePath location on the remote file system.
* @return boolean value indicating presence of the file on remote file system.
*/
default boolean checkObjectExists(Path remotePath) {
return false;
}
/**
* Delete list of remote files from the remote file system. It should throw exception if there
* is anything wrong in processing the request. If the remotePath passed do not exist, then it
* should just keep quiet.
*
* @param remotePaths list of files on remote file system to be deleted. This path may or may
* not exist.
* @throws BackupRestoreException in case of remote file system not able to process the request
* or unable to reach.
*/
void deleteRemoteFiles(List<Path> remotePaths) throws BackupRestoreException;
/**
* Get the number of tasks en-queue in the filesystem for upload.
*
* @return the total no. of tasks to be executed.
*/
int getUploadTasksQueued();
/**
* Get the number of tasks en-queue in the filesystem for download.
*
* @return the total no. of tasks to be executed.
*/
int getDownloadTasksQueued();
/** Clear the cache for the backup file system, if any. */
void clearCache();
}
| 3,357 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/backup/SnapshotDirectorySize.java | package com.netflix.priam.backup;
import java.io.IOException;
import java.nio.file.*;
import java.nio.file.attribute.BasicFileAttributes;
/** Estimates remaining bytes to upload in a backup by looking at the file system */
public class SnapshotDirectorySize implements DirectorySize {
public long getBytes(String location) {
SummingFileVisitor fileVisitor = new SummingFileVisitor();
try {
Files.walkFileTree(Paths.get(location), fileVisitor);
} catch (IOException e) {
// BackupFileVisitor is happy with an estimate and won't produce these in practice.
}
return fileVisitor.getTotalBytes();
}
private static final class SummingFileVisitor implements FileVisitor<Path> {
private long totalBytes;
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) {
if (file.toString().contains(AbstractBackup.SNAPSHOT_FOLDER) && attrs.isRegularFile()) {
totalBytes += attrs.size();
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFileFailed(Path file, IOException exc) {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) {
return FileVisitResult.CONTINUE;
}
long getTotalBytes() {
return totalBytes;
}
}
}
| 3,358 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/restore/RestoreContext.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.restore;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.scheduler.PriamScheduler;
import com.netflix.priam.scheduler.UnsupportedTypeException;
import javax.inject.Inject;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/*
* At run-time, determine the source type to restore from.
*/
public class RestoreContext {
private final PriamScheduler scheduler;
private final IConfiguration config;
private static final Logger logger = LoggerFactory.getLogger(RestoreContext.class);
@Inject
public RestoreContext(IConfiguration config, PriamScheduler scheduler) {
this.config = config;
this.scheduler = scheduler;
}
public boolean isRestoreEnabled() {
return !StringUtils.isEmpty(config.getRestoreSnapshot());
}
public void restore() throws Exception {
if (!isRestoreEnabled()) return;
// Restore is required.
if (StringUtils.isEmpty(config.getRestoreSourceType()) && !config.isRestoreEncrypted()) {
// Restore is needed and it will be done from the primary AWS account
scheduler.addTask(
Restore.JOBNAME,
Restore.class,
Restore.getTimer()); // restore from the AWS primary acct
logger.info("Scheduled task " + Restore.JOBNAME);
} else if (config.isRestoreEncrypted()) {
SourceType sourceType = SourceType.lookup(config.getRestoreSourceType(), true, false);
if (sourceType == null) {
scheduler.addTask(
EncryptedRestoreStrategy.JOBNAME,
EncryptedRestoreStrategy.class,
EncryptedRestoreStrategy.getTimer());
logger.info("Scheduled task " + Restore.JOBNAME);
return;
}
switch (sourceType) {
case AWSCROSSACCT:
scheduler.addTask(
AwsCrossAccountCryptographyRestoreStrategy.JOBNAME,
AwsCrossAccountCryptographyRestoreStrategy.class,
AwsCrossAccountCryptographyRestoreStrategy.getTimer());
logger.info(
"Scheduled task " + AwsCrossAccountCryptographyRestoreStrategy.JOBNAME);
break;
case GOOGLE:
scheduler.addTask(
GoogleCryptographyRestoreStrategy.JOBNAME,
GoogleCryptographyRestoreStrategy.class,
GoogleCryptographyRestoreStrategy.getTimer());
logger.info("Scheduled task " + GoogleCryptographyRestoreStrategy.JOBNAME);
break;
}
}
}
enum SourceType {
AWSCROSSACCT("AWSCROSSACCT"),
GOOGLE("GOOGLE");
private static final Logger logger = LoggerFactory.getLogger(SourceType.class);
private final String sourceType;
SourceType(String sourceType) {
this.sourceType = sourceType.toUpperCase();
}
public static SourceType lookup(
String sourceType, boolean acceptNullOrEmpty, boolean acceptIllegalValue)
throws UnsupportedTypeException {
if (StringUtils.isEmpty(sourceType))
if (acceptNullOrEmpty) return null;
else {
String message =
String.format(
"%s is not a supported SourceType. Supported values are %s",
sourceType, getSupportedValues());
logger.error(message);
throw new UnsupportedTypeException(message);
}
try {
return SourceType.valueOf(sourceType.toUpperCase());
} catch (IllegalArgumentException ex) {
String message =
String.format(
"%s is not a supported SourceType. Supported values are %s",
sourceType, getSupportedValues());
if (acceptIllegalValue) {
message =
message
+ ". Since acceptIllegalValue is set to True, returning NULL instead.";
logger.error(message);
return null;
}
logger.error(message);
throw new UnsupportedTypeException(message, ex);
}
}
private static String getSupportedValues() {
StringBuilder supportedValues = new StringBuilder();
boolean first = true;
for (SourceType type : SourceType.values()) {
if (!first) supportedValues.append(",");
supportedValues.append(type);
first = false;
}
return supportedValues.toString();
}
public static SourceType lookup(String sourceType) throws UnsupportedTypeException {
return lookup(sourceType, false, false);
}
public String getSourceType() {
return sourceType;
}
}
}
| 3,359 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/restore/IRestoreStrategy.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.restore;
/*
* A means to restore C* files from various source types (e.g. Google, AWS bucket whose objects are not owned by the current IAM role), and encrypted / non-encrypted data.
*/
public interface IRestoreStrategy {
// public void restore(Date startTime, Date endTime) throws Exception;
}
| 3,360 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/restore/EncryptedRestoreBase.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.restore;
import com.netflix.priam.backup.AbstractBackupPath;
import com.netflix.priam.backup.IBackupFileSystem;
import com.netflix.priam.backup.MetaData;
import com.netflix.priam.compress.CompressionType;
import com.netflix.priam.compress.ICompression;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.cred.ICredentialGeneric;
import com.netflix.priam.cryptography.IFileCryptography;
import com.netflix.priam.defaultimpl.ICassandraProcess;
import com.netflix.priam.health.InstanceState;
import com.netflix.priam.identity.InstanceIdentity;
import com.netflix.priam.scheduler.NamedThreadPoolExecutor;
import com.netflix.priam.utils.RetryableCallable;
import com.netflix.priam.utils.Sleeper;
import java.io.*;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadPoolExecutor;
import javax.inject.Provider;
import org.bouncycastle.util.io.Streams;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Provides common functionality applicable to all restore strategies */
public abstract class EncryptedRestoreBase extends AbstractRestore {
private static final Logger logger = LoggerFactory.getLogger(EncryptedRestoreBase.class);
private static final String TMP_SUFFIX = ".tmp";
private final String jobName;
private final ICredentialGeneric pgpCredential;
private final IFileCryptography fileCryptography;
private final ICompression compress;
private final ThreadPoolExecutor executor;
protected EncryptedRestoreBase(
IConfiguration config,
IBackupFileSystem fs,
String jobName,
Sleeper sleeper,
ICassandraProcess cassProcess,
Provider<AbstractBackupPath> pathProvider,
InstanceIdentity instanceIdentity,
RestoreTokenSelector tokenSelector,
ICredentialGeneric pgpCredential,
IFileCryptography fileCryptography,
ICompression compress,
MetaData metaData,
InstanceState instanceState,
IPostRestoreHook postRestoreHook) {
super(
config,
fs,
jobName,
sleeper,
pathProvider,
instanceIdentity,
tokenSelector,
cassProcess,
metaData,
instanceState,
postRestoreHook);
this.jobName = jobName;
this.pgpCredential = pgpCredential;
this.fileCryptography = fileCryptography;
this.compress = compress;
executor = new NamedThreadPoolExecutor(config.getRestoreThreads(), jobName);
executor.allowCoreThreadTimeOut(true);
logger.info(
"Trying to restore cassandra cluster with filesystem: {}, RestoreStrategy: {}, Encryption: ON, Compression: {}",
fs.getClass(),
jobName,
compress.getClass());
}
@Override
protected final Future<Path> downloadFile(final AbstractBackupPath path) throws Exception {
final char[] passPhrase =
new String(this.pgpCredential.getValue(ICredentialGeneric.KEY.PGP_PASSWORD))
.toCharArray();
File restoreLocation = path.newRestoreFile();
File tempFile = new File(restoreLocation.getAbsolutePath() + TMP_SUFFIX);
return executor.submit(
new RetryableCallable<Path>() {
@Override
public Path retriableCall() throws Exception {
// == download object from source bucket
try {
// Not retrying to download file here as it is already in RetryCallable.
fs.downloadFile(path, TMP_SUFFIX, 0 /* retries */);
} catch (Exception ex) {
// This behavior is retryable; therefore, lets get to a clean state
// before each retry.
if (tempFile.exists()) {
tempFile.createNewFile();
}
throw new Exception(
"Exception downloading file from: "
+ path.getRemotePath()
+ " to: "
+ tempFile.getAbsolutePath(),
ex);
}
// == object downloaded successfully from source, decrypt it.
File decryptedFile = new File(tempFile.getAbsolutePath() + ".decrypted");
try (OutputStream fOut =
new BufferedOutputStream(
new FileOutputStream(
decryptedFile)); // destination file after
// decryption)
InputStream in =
new BufferedInputStream(
new FileInputStream(tempFile.getAbsolutePath()))) {
InputStream encryptedDataInputStream =
fileCryptography.decryptStream(
in, passPhrase, tempFile.getAbsolutePath());
Streams.pipeAll(encryptedDataInputStream, fOut);
logger.info(
"Completed decrypting file: {} to final file dest: {}",
tempFile.getAbsolutePath(),
decryptedFile.getAbsolutePath());
} catch (Exception ex) {
// This behavior is retryable; therefore, lets get to a clean state
// before each retry.
if (tempFile.exists()) {
tempFile.createNewFile();
}
if (decryptedFile.exists()) {
decryptedFile.createNewFile();
}
throw new Exception(
"Exception during decryption file: "
+ decryptedFile.getAbsolutePath(),
ex);
}
// == object is downloaded and decrypted, now uncompress it if necessary
if (path.getCompression() == CompressionType.NONE) {
Files.move(decryptedFile.toPath(), restoreLocation.toPath());
} else {
logger.info(
"Start uncompressing file: {} to the FINAL destination stream",
decryptedFile.getAbsolutePath());
try (InputStream is =
new BufferedInputStream(
new FileInputStream(decryptedFile));
BufferedOutputStream finalDestination =
new BufferedOutputStream(
new FileOutputStream(restoreLocation))) {
compress.decompressAndClose(is, finalDestination);
} catch (Exception ex) {
throw new Exception(
"Exception uncompressing file: "
+ decryptedFile.getAbsolutePath()
+ " to the FINAL destination stream",
ex);
}
logger.info(
"Completed uncompressing file: {} to the FINAL destination stream "
+ " current worker: {}",
decryptedFile.getAbsolutePath(),
Thread.currentThread().getName());
}
// if here, everything was successful for this object, lets remove unneeded
// file(s)
if (tempFile.exists()) tempFile.delete();
if (decryptedFile.exists()) {
decryptedFile.delete();
}
return Paths.get(path.getRemotePath());
}
});
}
@Override
public String getName() {
return this.jobName;
}
}
| 3,361 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/restore/IPostRestoreHook.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.restore;
import com.google.inject.ImplementedBy;
/** Interface for post restore hook */
@ImplementedBy(PostRestoreHook.class)
public interface IPostRestoreHook {
boolean hasValidParameters();
void execute() throws Exception;
}
| 3,362 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/restore/PostRestoreHookException.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.restore;
/** Exception raised by PostRestoreHook */
public class PostRestoreHookException extends Exception {
public PostRestoreHookException(String message) {
super(message);
}
public PostRestoreHookException(String message, Exception e) {
super(message, e);
}
}
| 3,363 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/restore/EncryptedRestoreStrategy.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.restore;
import com.netflix.priam.backup.AbstractBackupPath;
import com.netflix.priam.backup.IBackupFileSystem;
import com.netflix.priam.backup.MetaData;
import com.netflix.priam.compress.ICompression;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.cred.ICredentialGeneric;
import com.netflix.priam.cryptography.IFileCryptography;
import com.netflix.priam.defaultimpl.ICassandraProcess;
import com.netflix.priam.health.InstanceState;
import com.netflix.priam.identity.InstanceIdentity;
import com.netflix.priam.scheduler.SimpleTimer;
import com.netflix.priam.scheduler.TaskTimer;
import com.netflix.priam.utils.Sleeper;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Provider;
import javax.inject.Singleton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/*
* A strategy to restore encrypted data from a primary AWS account
*/
@Singleton
public class EncryptedRestoreStrategy extends EncryptedRestoreBase {
private static final Logger logger = LoggerFactory.getLogger(EncryptedRestoreStrategy.class);
public static final String JOBNAME = "CRYPTOGRAPHY_RESTORE_JOB";
@Inject
public EncryptedRestoreStrategy(
final IConfiguration config,
ICassandraProcess cassProcess,
@Named("encryptedbackup") IBackupFileSystem fs,
Sleeper sleeper,
@Named("filecryptoalgorithm") IFileCryptography fileCryptography,
@Named("pgpcredential") ICredentialGeneric credential,
ICompression compress,
Provider<AbstractBackupPath> pathProvider,
InstanceIdentity id,
RestoreTokenSelector tokenSelector,
MetaData metaData,
InstanceState instanceState,
IPostRestoreHook postRestoreHook) {
super(
config,
fs,
JOBNAME,
sleeper,
cassProcess,
pathProvider,
id,
tokenSelector,
credential,
fileCryptography,
compress,
metaData,
instanceState,
postRestoreHook);
}
/*
* @return a timer used by the scheduler to determine when "this" should be run.
*/
public static TaskTimer getTimer() {
return new SimpleTimer(JOBNAME);
}
}
| 3,364 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/restore/AwsCrossAccountCryptographyRestoreStrategy.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.restore;
import com.netflix.priam.aws.S3CrossAccountFileSystem;
import com.netflix.priam.backup.AbstractBackupPath;
import com.netflix.priam.backup.MetaData;
import com.netflix.priam.compress.ICompression;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.cred.ICredentialGeneric;
import com.netflix.priam.cryptography.IFileCryptography;
import com.netflix.priam.defaultimpl.ICassandraProcess;
import com.netflix.priam.health.InstanceState;
import com.netflix.priam.identity.InstanceIdentity;
import com.netflix.priam.scheduler.SimpleTimer;
import com.netflix.priam.scheduler.TaskTimer;
import com.netflix.priam.utils.Sleeper;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Provider;
import javax.inject.Singleton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/*
* A strategy to restore from an AWS bucket whose objects are not owned by the current IAM role thus requiring AWS cross account assumption.
* In addition, this strategy will handle data which has been encrypted.
*/
@Singleton
public class AwsCrossAccountCryptographyRestoreStrategy extends EncryptedRestoreBase {
private static final Logger logger =
LoggerFactory.getLogger(AwsCrossAccountCryptographyRestoreStrategy.class);
public static final String JOBNAME = "AWS_CROSS_ACCT_CRYPTOGRAPHY_RESTORE_JOB";
// Note: see javadoc for S3CrossAccountFileSystem for reason why we inject a concrete class
// (S3CrossAccountFileSystem) instead of the inteface IBackupFileSystem
@Inject
public AwsCrossAccountCryptographyRestoreStrategy(
final IConfiguration config,
ICassandraProcess cassProcess,
S3CrossAccountFileSystem crossAcctfs,
Sleeper sleeper,
@Named("filecryptoalgorithm") IFileCryptography fileCryptography,
@Named("pgpcredential") ICredentialGeneric credential,
ICompression compress,
Provider<AbstractBackupPath> pathProvider,
InstanceIdentity id,
RestoreTokenSelector tokenSelector,
MetaData metaData,
InstanceState instanceState,
IPostRestoreHook postRestoreHook) {
super(
config,
crossAcctfs.getBackupFileSystem(),
JOBNAME,
sleeper,
cassProcess,
pathProvider,
id,
tokenSelector,
credential,
fileCryptography,
compress,
metaData,
instanceState,
postRestoreHook);
}
/** @return a timer used by the scheduler to determine when "this" should be run. */
public static TaskTimer getTimer() {
return new SimpleTimer(JOBNAME);
}
}
| 3,365 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/restore/GoogleCryptographyRestoreStrategy.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.restore;
import com.netflix.priam.backup.AbstractBackupPath;
import com.netflix.priam.backup.IBackupFileSystem;
import com.netflix.priam.backup.MetaData;
import com.netflix.priam.compress.ICompression;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.cred.ICredentialGeneric;
import com.netflix.priam.cryptography.IFileCryptography;
import com.netflix.priam.defaultimpl.ICassandraProcess;
import com.netflix.priam.health.InstanceState;
import com.netflix.priam.identity.InstanceIdentity;
import com.netflix.priam.scheduler.SimpleTimer;
import com.netflix.priam.scheduler.TaskTimer;
import com.netflix.priam.utils.Sleeper;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Provider;
import javax.inject.Singleton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Singleton
public class GoogleCryptographyRestoreStrategy extends EncryptedRestoreBase {
private static final Logger logger =
LoggerFactory.getLogger(GoogleCryptographyRestoreStrategy.class);
public static final String JOBNAME = "GOOGLECLOUDSTORAGE_RESTORE_JOB";
@Inject
public GoogleCryptographyRestoreStrategy(
final IConfiguration config,
ICassandraProcess cassProcess,
@Named("gcsencryptedbackup") IBackupFileSystem fs,
Sleeper sleeper,
@Named("filecryptoalgorithm") IFileCryptography fileCryptography,
@Named("pgpcredential") ICredentialGeneric credential,
ICompression compress,
Provider<AbstractBackupPath> pathProvider,
InstanceIdentity id,
RestoreTokenSelector tokenSelector,
MetaData metaData,
InstanceState instanceState,
IPostRestoreHook postRestoreHook) {
super(
config,
fs,
JOBNAME,
sleeper,
cassProcess,
pathProvider,
id,
tokenSelector,
credential,
fileCryptography,
compress,
metaData,
instanceState,
postRestoreHook);
}
/** @return a timer used by the scheduler to determine when "this" should be run. */
public static TaskTimer getTimer() {
return new SimpleTimer(JOBNAME);
}
}
| 3,366 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/restore/AbstractRestore.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.restore;
import com.netflix.priam.backup.*;
import com.netflix.priam.backup.AbstractBackupPath.BackupFileType;
import com.netflix.priam.backupv2.IMetaProxy;
import com.netflix.priam.config.IBackupRestoreConfig;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.defaultimpl.ICassandraProcess;
import com.netflix.priam.health.InstanceState;
import com.netflix.priam.identity.InstanceIdentity;
import com.netflix.priam.identity.config.InstanceInfo;
import com.netflix.priam.scheduler.Task;
import com.netflix.priam.utils.DateUtil;
import com.netflix.priam.utils.RetryableCallable;
import com.netflix.priam.utils.Sleeper;
import com.netflix.priam.utils.SystemUtils;
import java.io.File;
import java.io.IOException;
import java.math.BigInteger;
import java.nio.file.Path;
import java.time.LocalDateTime;
import java.time.ZoneId;
import java.util.*;
import java.util.concurrent.Future;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Provider;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A means to perform a restore. This class contains the following characteristics: - It is agnostic
* to the source type of the restore, this is determine by the injected IBackupFileSystem. - This
* class can be scheduled, i.e. it is a "Task". - When this class is executed, it uses its own
* thread pool to execute the restores.
*/
public abstract class AbstractRestore extends Task implements IRestoreStrategy {
private static final Logger logger = LoggerFactory.getLogger(AbstractRestore.class);
private static final String JOBNAME = "AbstractRestore";
private static final String SYSTEM_KEYSPACE = "system";
private static BigInteger restoreToken;
final IBackupFileSystem fs;
final Sleeper sleeper;
private final BackupRestoreUtil backupRestoreUtil;
private final Provider<AbstractBackupPath> pathProvider;
private final InstanceIdentity instanceIdentity;
private final RestoreTokenSelector tokenSelector;
private final ICassandraProcess cassProcess;
private final InstanceState instanceState;
private final MetaData metaData;
private final IPostRestoreHook postRestoreHook;
@Inject
@Named("v1")
IMetaProxy metaV1Proxy;
@Inject
@Named("v2")
IMetaProxy metaV2Proxy;
@Inject IBackupRestoreConfig backupRestoreConfig;
public AbstractRestore(
IConfiguration config,
IBackupFileSystem fs,
String name,
Sleeper sleeper,
Provider<AbstractBackupPath> pathProvider,
InstanceIdentity instanceIdentity,
RestoreTokenSelector tokenSelector,
ICassandraProcess cassProcess,
MetaData metaData,
InstanceState instanceState,
IPostRestoreHook postRestoreHook) {
super(config);
this.fs = fs;
this.sleeper = sleeper;
this.pathProvider = pathProvider;
this.instanceIdentity = instanceIdentity;
this.tokenSelector = tokenSelector;
this.cassProcess = cassProcess;
this.metaData = metaData;
this.instanceState = instanceState;
backupRestoreUtil =
new BackupRestoreUtil(
config.getRestoreIncludeCFList(), config.getRestoreExcludeCFList());
this.postRestoreHook = postRestoreHook;
}
public static final boolean isRestoreEnabled(IConfiguration conf, InstanceInfo instanceInfo) {
boolean isRestoreMode = StringUtils.isNotBlank(conf.getRestoreSnapshot());
boolean isBackedupRac =
(CollectionUtils.isEmpty(conf.getBackupRacs())
|| conf.getBackupRacs().contains(instanceInfo.getRac()));
return (isRestoreMode && isBackedupRac);
}
private List<Future<Path>> download(
Iterator<AbstractBackupPath> fsIterator, boolean waitForCompletion) throws Exception {
List<Future<Path>> futureList = new ArrayList<>();
while (fsIterator.hasNext()) {
AbstractBackupPath temp = fsIterator.next();
if (backupRestoreUtil.isFiltered(
temp.getKeyspace(), temp.getColumnFamily())) { // is filtered?
logger.info(
"Bypassing restoring file \"{}\" as it is part of the keyspace.columnfamily filter list. Its keyspace:cf is: {}:{}",
temp.newRestoreFile(),
temp.getKeyspace(),
temp.getColumnFamily());
continue;
}
File localFileHandler = temp.newRestoreFile();
if (logger.isDebugEnabled())
logger.debug(
"Created local file name: "
+ localFileHandler.getAbsolutePath()
+ File.pathSeparator
+ localFileHandler.getName());
futureList.add(downloadFile(temp));
}
// Wait for all download to finish that were started from this method.
if (waitForCompletion) waitForCompletion(futureList);
return futureList;
}
private void waitForCompletion(List<Future<Path>> futureList) throws Exception {
for (Future<Path> future : futureList) future.get();
}
private List<Future<Path>> downloadCommitLogs(
Iterator<AbstractBackupPath> fsIterator, int lastN, boolean waitForCompletion)
throws Exception {
if (fsIterator == null) return null;
BoundedList<AbstractBackupPath> bl = new BoundedList(lastN);
while (fsIterator.hasNext()) {
AbstractBackupPath temp = fsIterator.next();
if (temp.getType() == BackupFileType.CL) {
bl.add(temp);
}
}
return download(bl.iterator(), waitForCompletion);
}
private void stopCassProcess() throws IOException {
cassProcess.stop(true);
}
@Override
public void execute() throws Exception {
if (!isRestoreEnabled(config, instanceIdentity.getInstanceInfo())) return;
logger.info("Starting restore for {}", config.getRestoreSnapshot());
final DateUtil.DateRange dateRange = new DateUtil.DateRange(config.getRestoreSnapshot());
new RetryableCallable<Void>() {
public Void retriableCall() throws Exception {
logger.info("Attempting restore");
restore(dateRange);
logger.info("Restore completed");
// Wait for other server init to complete
sleeper.sleep(30000);
return null;
}
}.call();
}
public void restore(DateUtil.DateRange dateRange) throws Exception {
// fail early if post restore hook has invalid parameters
if (!postRestoreHook.hasValidParameters()) {
throw new PostRestoreHookException("Invalid PostRestoreHook parameters");
}
Date endTime = new Date(dateRange.getEndTime().toEpochMilli());
IMetaProxy metaProxy = metaV1Proxy;
if (backupRestoreConfig.enableV2Restore()) metaProxy = metaV2Proxy;
// Set the restore status.
instanceState.getRestoreStatus().resetStatus();
instanceState
.getRestoreStatus()
.setStartDateRange(
LocalDateTime.ofInstant(dateRange.getStartTime(), ZoneId.of("UTC")));
instanceState.getRestoreStatus().setEndDateRange(DateUtil.convert(endTime));
instanceState.getRestoreStatus().setExecutionStartTime(LocalDateTime.now());
instanceState.setRestoreStatus(Status.STARTED);
String origToken = instanceIdentity.getInstance().getToken();
try {
if (config.isRestoreClosestToken()) {
restoreToken =
tokenSelector.getClosestToken(
new BigInteger(origToken),
new Date(dateRange.getStartTime().toEpochMilli()));
instanceIdentity.getInstance().setToken(restoreToken.toString());
}
// Stop cassandra if its running
stopCassProcess();
// Cleanup local data
File dataDir = new File(config.getDataFileLocation());
if (dataDir.exists() && dataDir.isDirectory()) FileUtils.cleanDirectory(dataDir);
// Find latest valid meta file.
Optional<AbstractBackupPath> latestValidMetaFile =
BackupRestoreUtil.getLatestValidMetaPath(metaProxy, dateRange);
if (!latestValidMetaFile.isPresent()) {
logger.info("No valid snapshot meta file found, Restore Failed.");
instanceState.getRestoreStatus().setExecutionEndTime(LocalDateTime.now());
instanceState.setRestoreStatus(Status.FAILED);
return;
}
logger.info(
"Snapshot Meta file for restore {}", latestValidMetaFile.get().getRemotePath());
instanceState
.getRestoreStatus()
.setSnapshotMetaFile(latestValidMetaFile.get().getRemotePath());
List<AbstractBackupPath> allFiles =
BackupRestoreUtil.getMostRecentSnapshotPaths(
latestValidMetaFile.get(), metaProxy, pathProvider);
if (!config.skipIncrementalRestore()) {
allFiles.addAll(
BackupRestoreUtil.getIncrementalPaths(
latestValidMetaFile.get(), dateRange, metaProxy));
}
// Download snapshot which is listed in the meta file.
List<Future<Path>> futureList = new ArrayList<>();
futureList.addAll(download(allFiles.iterator(), false));
// Downloading CommitLogs
// Note for Backup V2.0 we do not backup commit logs, as saving them is cost-expensive.
if (config.isBackingUpCommitLogs()) {
logger.info(
"Delete all backuped commitlog files in {}",
config.getBackupCommitLogLocation());
SystemUtils.cleanupDir(config.getBackupCommitLogLocation(), null);
logger.info("Delete all commitlog files in {}", config.getCommitLogLocation());
SystemUtils.cleanupDir(config.getCommitLogLocation(), null);
String prefix = fs.getPrefix().toString();
Iterator<AbstractBackupPath> commitLogPathIterator =
fs.list(prefix, latestValidMetaFile.get().getTime(), endTime);
futureList.addAll(
downloadCommitLogs(
commitLogPathIterator, config.maxCommitLogsRestore(), false));
}
// Wait for all the futures to finish.
waitForCompletion(futureList);
// Given that files are restored now, kick off post restore hook
logger.info("Starting post restore hook");
postRestoreHook.execute();
logger.info("Completed executing post restore hook");
// Declare restore as finished.
instanceState.getRestoreStatus().setExecutionEndTime(LocalDateTime.now());
instanceState.setRestoreStatus(Status.FINISHED);
// Start cassandra if restore is successful.
if (!config.doesCassandraStartManually()) cassProcess.start(true);
else
logger.info(
"config.doesCassandraStartManually() is set to True, hence Cassandra needs to be started manually ...");
} catch (Exception e) {
instanceState.setRestoreStatus(Status.FAILED);
instanceState.getRestoreStatus().setExecutionEndTime(LocalDateTime.now());
logger.error("Error while trying to restore: {}", e.getMessage(), e);
throw e;
} finally {
instanceIdentity.getInstance().setToken(origToken);
}
}
/**
* Download file to the location specified. After downloading the file will be
* decrypted(optionally) and decompressed before saving to final location.
*
* @param path - path of object to download from source S3/GCS.
* @return Future of the job to track the progress of the job.
* @throws Exception If there is any error in downloading file from the remote file system.
*/
protected abstract Future<Path> downloadFile(final AbstractBackupPath path) throws Exception;
final class BoundedList<E> extends LinkedList<E> {
private final int limit;
BoundedList(int limit) {
this.limit = limit;
}
@Override
public boolean add(E o) {
super.add(o);
while (size() > limit) {
super.remove();
}
return true;
}
}
public final int getDownloadTasksQueued() {
return fs.getDownloadTasksQueued();
}
}
| 3,367 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/restore/Restore.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.restore;
import com.netflix.priam.backup.AbstractBackupPath;
import com.netflix.priam.backup.IBackupFileSystem;
import com.netflix.priam.backup.MetaData;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.defaultimpl.ICassandraProcess;
import com.netflix.priam.health.InstanceState;
import com.netflix.priam.identity.InstanceIdentity;
import com.netflix.priam.scheduler.SimpleTimer;
import com.netflix.priam.scheduler.TaskTimer;
import com.netflix.priam.utils.Sleeper;
import java.nio.file.Path;
import java.util.concurrent.Future;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Provider;
import javax.inject.Singleton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Main class for restoring data from backup. Backup restored using this way are not encrypted. */
@Singleton
public class Restore extends AbstractRestore {
public static final String JOBNAME = "AUTO_RESTORE_JOB";
private static final Logger logger = LoggerFactory.getLogger(Restore.class);
@Inject
public Restore(
IConfiguration config,
@Named("backup") IBackupFileSystem fs,
Sleeper sleeper,
ICassandraProcess cassProcess,
Provider<AbstractBackupPath> pathProvider,
InstanceIdentity instanceIdentity,
RestoreTokenSelector tokenSelector,
MetaData metaData,
InstanceState instanceState,
IPostRestoreHook postRestoreHook) {
super(
config,
fs,
JOBNAME,
sleeper,
pathProvider,
instanceIdentity,
tokenSelector,
cassProcess,
metaData,
instanceState,
postRestoreHook);
}
@Override
protected final Future<Path> downloadFile(final AbstractBackupPath path) throws Exception {
return fs.asyncDownloadFile(path, 5 /* retries */);
}
public static TaskTimer getTimer() {
return new SimpleTimer(JOBNAME);
}
@Override
public String getName() {
return JOBNAME;
}
}
| 3,368 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/restore/PostRestoreHook.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.restore;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.scheduler.NamedThreadPoolExecutor;
import com.netflix.priam.utils.RetryableCallable;
import com.netflix.priam.utils.Sleeper;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import javax.inject.Inject;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An implementation of IPostRestoreHook. Kicks off a child process for post restore hook using
* ProcessBuilder; uses heart beat monitor to monitor progress of the sub process and uses a file
* lock to pass the active state to the sub process
*/
public class PostRestoreHook implements IPostRestoreHook {
private static final Logger logger = LoggerFactory.getLogger(PostRestoreHook.class);
private final IConfiguration config;
private final Sleeper sleeper;
private static final String PostRestoreHookCommandDelimiter = " ";
private static final String PriamPostRestoreHookFilePrefix = "PriamFileForPostRestoreHook";
private static final String PriamPostRestoreHookFileSuffix = ".tmp";
private static final String PriamPostRestoreHookFileOptionName = "--parentHookFilePath=";
@Inject
public PostRestoreHook(IConfiguration config, Sleeper sleeper) {
this.config = config;
this.sleeper = sleeper;
}
/**
* Checks parameters to make sure none are blank
*
* @return if all parameters are valid
*/
public boolean hasValidParameters() {
if (config.isPostRestoreHookEnabled()) {
return !StringUtils.isBlank(config.getPostRestoreHook())
&& !StringUtils.isBlank(config.getPostRestoreHookHeartbeatFileName())
&& !StringUtils.isBlank(config.getPostRestoreHookDoneFileName());
}
return true;
}
/**
* Executes a sub process as part of post restore hook, and waits for the completion of the
* process. In case of lack of heart beat from the sub process, existing sub process is
* terminated and new sub process is kicked off
*
* @throws Exception
*/
public void execute() throws Exception {
if (config.isPostRestoreHookEnabled()) {
logger.debug("Started PostRestoreHook execution");
// create a temp file to be used to indicate state of the current process, to the
// sub-process
File tempLockFile =
File.createTempFile(
PriamPostRestoreHookFilePrefix, PriamPostRestoreHookFileSuffix);
RandomAccessFile raf = new RandomAccessFile(tempLockFile.getPath(), "rw");
FileChannel fileChannel = raf.getChannel();
FileLock lock = fileChannel.lock();
try {
if (lock.isValid()) {
logger.info("Lock on RestoreHookFile acquired");
int countOfProcessStarts = 0;
while (true) {
if (doneFileExists()) {
logger.info(
"Not starting PostRestoreHook since DONE file already exists.");
break;
}
String postRestoreHook = config.getPostRestoreHook();
// add temp file path as parameter to the jar file
postRestoreHook =
postRestoreHook
+ PostRestoreHookCommandDelimiter
+ PriamPostRestoreHookFileOptionName
+ tempLockFile.getAbsolutePath();
String[] processCommandArguments =
postRestoreHook.split(PostRestoreHookCommandDelimiter);
ProcessBuilder processBuilder = new ProcessBuilder(processCommandArguments);
// start sub-process
Process process = processBuilder.inheritIO().start();
logger.info(
"Started PostRestoreHook: {} - Attempt#{}",
postRestoreHook,
++countOfProcessStarts);
// monitor progress of sub-process
monitorPostRestoreHookHeartBeat(process);
// block until sub-process completes or until the timeout
if (!process.waitFor(
config.getPostRestoreHookTimeOutInDays(), TimeUnit.DAYS)) {
logger.info(
"PostRestoreHook process did not complete within {} days. Forcefully terminating the process.",
config.getPostRestoreHookTimeOutInDays());
process.destroyForcibly();
}
if (process.exitValue() == 0) {
logger.info("PostRestoreHook process completed successfully");
break;
}
logger.warn("PostRestoreHook process exited unsuccessfully");
}
logger.debug("Completed PostRestoreHook execution");
} else {
throw new PostRestoreHookException(
String.format(
"Could not acquire lock on a temp file necessary for PostRestoreHook to execute. Path to temp file: %s",
tempLockFile.getAbsolutePath()));
}
} finally {
// close and delete temp file
lock.release();
fileChannel.close();
raf.close();
tempLockFile.delete();
}
}
}
/**
* Monitors heart beat of the process
*
* @param process Process to be monitored
* @throws InterruptedException
* @throws IOException
*/
private void monitorPostRestoreHookHeartBeat(Process process)
throws InterruptedException, IOException {
File heartBeatFile = new File(config.getPostRestoreHookHeartbeatFileName());
ThreadPoolExecutor heartBeatPoolExecutor =
new NamedThreadPoolExecutor(1, "PostRestoreHook_HeartBeatThreadPool");
heartBeatPoolExecutor.allowCoreThreadTimeOut(true);
heartBeatPoolExecutor.submit(
new RetryableCallable<Integer>() {
@Override
public Integer retriableCall() throws Exception {
while (true) {
sleeper.sleep(config.getPostRestoreHookHeartbeatCheckFrequencyInMs());
if (System.currentTimeMillis() - heartBeatFile.lastModified()
> config.getPostRestoreHookHeartBeatTimeoutInMs()) {
// kick off post restore hook process, since there is no heartbeat
logger.info(
"No heartbeat for the last {} ms, killing the existing process.",
config.getPostRestoreHookHeartBeatTimeoutInMs());
if (process.isAlive()) {
process.destroyForcibly();
}
return 0;
}
}
}
});
}
/**
* Checks for presence of DONE file
*
* @return if done file exists
*/
private boolean doneFileExists() {
File doneFile = new File(config.getPostRestoreHookDoneFileName());
return doneFile.exists();
}
}
| 3,369 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/restore/RestoreTokenSelector.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.restore;
import com.netflix.priam.backup.AbstractBackupPath;
import com.netflix.priam.backup.IBackupFileSystem;
import com.netflix.priam.utils.ITokenManager;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import javax.inject.Inject;
import javax.inject.Named;
/** Runs algorithms as finding closest token from a list of token (in a backup) */
public class RestoreTokenSelector {
private final ITokenManager tokenManager;
private final IBackupFileSystem fs;
@Inject
public RestoreTokenSelector(ITokenManager tokenManager, @Named("backup") IBackupFileSystem fs) {
this.tokenManager = tokenManager;
this.fs = fs;
}
/**
* Get the closest token to current token from the list of tokens available in the backup
*
* @param tokenToSearch Token to search for
* @param startDate Date for which the backups are available
* @return Token as BigInteger
*/
public BigInteger getClosestToken(BigInteger tokenToSearch, Date startDate) {
List<BigInteger> tokenList = new ArrayList<>();
Iterator<AbstractBackupPath> iter = fs.listPrefixes(startDate);
while (iter.hasNext()) tokenList.add(new BigInteger(iter.next().getToken()));
return tokenManager.findClosestToken(tokenToSearch, tokenList);
}
}
| 3,370 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/merics/CassMonitorMetrics.java | /**
* Copyright 2018 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.merics;
import com.netflix.spectator.api.Gauge;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.patterns.PolledMeter;
import java.util.concurrent.atomic.AtomicLong;
import javax.inject.Inject;
import javax.inject.Singleton;
/** @author vchella */
@Singleton
public class CassMonitorMetrics {
private final Gauge cassStop, cassAutoStart, cassStart;
private final AtomicLong getSeedsCnt, getTokenCnt, getReplacedIpCnt, doubleRingCnt;
@Inject
public CassMonitorMetrics(Registry registry) {
cassStop = registry.gauge(Metrics.METRIC_PREFIX + "cass.stop");
cassStart = registry.gauge(Metrics.METRIC_PREFIX + "cass.start");
cassAutoStart = registry.gauge(Metrics.METRIC_PREFIX + "cass.auto.start");
getSeedsCnt =
PolledMeter.using(registry)
.withName(Metrics.METRIC_PREFIX + "cass.getSeedCnt")
.monitorMonotonicCounter(new AtomicLong(0));
getTokenCnt =
PolledMeter.using(registry)
.withName(Metrics.METRIC_PREFIX + "cass.getTokenCnt")
.monitorMonotonicCounter(new AtomicLong(0));
getReplacedIpCnt =
PolledMeter.using(registry)
.withName(Metrics.METRIC_PREFIX + "cass.getReplacedIpCnt")
.monitorMonotonicCounter(new AtomicLong(0));
doubleRingCnt =
PolledMeter.using(registry)
.withName(Metrics.METRIC_PREFIX + "cass.doubleRingCnt")
.monitorMonotonicCounter(new AtomicLong(0));
}
public void incCassStop() {
cassStop.set(cassStop.value() + 1);
}
public void incCassAutoStart() {
cassAutoStart.set(cassAutoStart.value() + 1);
}
public void incCassStart() {
cassStart.set(cassStart.value() + 1);
}
public void incGetSeeds() {
getSeedsCnt.incrementAndGet();
}
public void incGetToken() {
getTokenCnt.incrementAndGet();
}
public void incGetReplacedIp() {
getReplacedIpCnt.incrementAndGet();
}
public void incDoubleRing() {
doubleRingCnt.incrementAndGet();
}
}
| 3,371 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/merics/IMeasurement.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.merics;
/**
* Represents a specific measurement for publishing to a metric system
*
* <p>Created by vinhn on 10/14/16.
*/
public interface IMeasurement {
void incrementFailure();
void incrementSuccess();
}
| 3,372 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/merics/BackupMetrics.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.merics;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.DistributionSummary;
import com.netflix.spectator.api.Registry;
import javax.inject.Inject;
import javax.inject.Singleton;
/** Created by vinhn on 2/13/17. */
@Singleton
public class BackupMetrics {
private final Registry registry;
/**
* Distribution summary will provide the metric like count (how many uploads were made), max no.
* of bytes uploaded and total amount of bytes uploaded.
*/
private final DistributionSummary uploadRate, downloadRate;
private final Counter validUploads,
validDownloads,
invalidUploads,
invalidDownloads,
snsNotificationSuccess,
snsNotificationFailure,
forgottenFiles,
backupVerificationFailure;
public static final String uploadQueueSize = Metrics.METRIC_PREFIX + "upload.queue.size";
public static final String downloadQueueSize = Metrics.METRIC_PREFIX + "download.queue.size";
@Inject
public BackupMetrics(Registry registry) {
this.registry = registry;
validDownloads = registry.counter(Metrics.METRIC_PREFIX + "download.valid");
invalidDownloads = registry.counter(Metrics.METRIC_PREFIX + "download.invalid");
validUploads = registry.counter(Metrics.METRIC_PREFIX + "upload.valid");
invalidUploads = registry.counter(Metrics.METRIC_PREFIX + "upload.invalid");
uploadRate = registry.distributionSummary(Metrics.METRIC_PREFIX + "upload.rate");
downloadRate = registry.distributionSummary(Metrics.METRIC_PREFIX + "download.rate");
snsNotificationSuccess =
registry.counter(Metrics.METRIC_PREFIX + "sns.notification.success");
snsNotificationFailure =
registry.counter(Metrics.METRIC_PREFIX + "sns.notification.failure");
forgottenFiles = registry.counter(Metrics.METRIC_PREFIX + "forgotten.files");
backupVerificationFailure =
registry.counter(Metrics.METRIC_PREFIX + "backup.verification.failure");
}
public DistributionSummary getUploadRate() {
return uploadRate;
}
public Counter getInvalidUploads() {
return invalidUploads;
}
public Counter getInvalidDownloads() {
return invalidDownloads;
}
public Counter getSnsNotificationSuccess() {
return snsNotificationSuccess;
}
public Counter getSnsNotificationFailure() {
return snsNotificationFailure;
}
public void incrementInvalidUploads() {
this.invalidUploads.increment();
}
public void incrementInvalidDownloads() {
this.invalidDownloads.increment();
}
public void incrementSnsNotificationSuccess() {
snsNotificationSuccess.increment();
}
public void incrementSnsNotificationFailure() {
snsNotificationFailure.increment();
}
public void incrementBackupVerificationFailure() {
backupVerificationFailure.increment();
}
public void recordUploadRate(long sizeInBytes) {
uploadRate.record(sizeInBytes);
}
public void incrementForgottenFiles(long forgottenFilesVal) {
forgottenFiles.increment(forgottenFilesVal);
}
public void recordDownloadRate(long sizeInBytes) {
downloadRate.record(sizeInBytes);
}
public DistributionSummary getDownloadRate() {
return downloadRate;
}
public Counter getValidUploads() {
return validUploads;
}
public Counter getValidDownloads() {
return validDownloads;
}
public void incrementValidUploads() {
this.validUploads.increment();
}
public void incrementValidDownloads() {
this.validDownloads.increment();
}
public Registry getRegistry() {
return registry;
}
}
| 3,373 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/merics/NodeToolFlushMeasurement.java | /**
* Copyright 2017 Netflix, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.merics;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Registry;
import javax.inject.Inject;
import javax.inject.Singleton;
/**
* Represents the value to be publish to a telemetry endpoint
*
* <p>Created by vinhn on 10/14/16.
*/
@Singleton
public class NodeToolFlushMeasurement implements IMeasurement {
private final Counter failure, success;
@Inject
public NodeToolFlushMeasurement(Registry registry) {
failure = registry.counter(Metrics.METRIC_PREFIX + "flush.failure");
success = registry.counter(Metrics.METRIC_PREFIX + "flush.success");
}
public void incrementFailure() {
this.failure.increment();
}
public void incrementSuccess() {
success.increment();
}
}
| 3,374 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/merics/CompactionMeasurement.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.merics;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Registry;
import javax.inject.Inject;
import javax.inject.Singleton;
/** Measurement class for scheduled compactions Created by aagrawal on 2/28/18. */
@Singleton
public class CompactionMeasurement implements IMeasurement {
private final Counter failure, success;
@Inject
public CompactionMeasurement(Registry registry) {
failure = registry.counter(Metrics.METRIC_PREFIX + "compaction.failure");
success = registry.counter(Metrics.METRIC_PREFIX + "compaction.success");
}
public void incrementFailure() {
this.failure.increment();
}
public void incrementSuccess() {
success.increment();
}
}
| 3,375 |
0 | Create_ds/Priam/priam/src/main/java/com/netflix/priam | Create_ds/Priam/priam/src/main/java/com/netflix/priam/merics/Metrics.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.merics;
/** Created by aagrawal on 8/15/18. */
public interface Metrics {
String METRIC_PREFIX = "priam.";
}
| 3,376 |
0 | Create_ds/Priam/priam-cass-extensions/src/main/java/com/netflix/priam/cassandra | Create_ds/Priam/priam-cass-extensions/src/main/java/com/netflix/priam/cassandra/extensions/NFSeedProvider.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.cassandra.extensions;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.locator.SeedProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Retrieves the list of seeds from Priam. */
public class NFSeedProvider implements SeedProvider {
private static final Logger logger = LoggerFactory.getLogger(NFSeedProvider.class);
public NFSeedProvider(Map<String, String> args) {}
@Override
public List<InetAddress> getSeeds() {
List<InetAddress> seeds = new ArrayList<InetAddress>();
try {
String priamSeeds =
DataFetcher.fetchData(
"http://127.0.0.1:8080/Priam/REST/v1/cassconfig/get_seeds");
for (String seed : priamSeeds.split(",")) seeds.add(InetAddress.getByName(seed));
} catch (Exception e) {
logger.error("Failed to load seed data", e);
}
return seeds;
}
}
| 3,377 |
0 | Create_ds/Priam/priam-cass-extensions/src/main/java/com/netflix/priam/cassandra | Create_ds/Priam/priam-cass-extensions/src/main/java/com/netflix/priam/cassandra/extensions/PriamStartupAgent.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.cassandra.extensions;
import java.lang.instrument.Instrumentation;
import java.util.Iterator;
import org.apache.cassandra.utils.FBUtilities;
import org.apache.commons.lang3.StringUtils;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
/**
* A <a
* href="http://docs.oracle.com/javase/6/docs/api/java/lang/instrument/package-summary.html">PreMain</a>
* class to run inside of the cassandra process. Contacts Priam for essential cassandra startup
* information like token and seeds.
*/
public class PriamStartupAgent {
public static String REPLACED_ADDRESS_MIN_VER = "1.2.11";
public static void premain(String agentArgs, Instrumentation inst) {
PriamStartupAgent agent = new PriamStartupAgent();
agent.setPriamProperties();
}
private void setPriamProperties() {
String token = null;
String seeds = null;
boolean isReplace = false;
String replacedIp = "";
String extraEnvParams = null;
while (true) {
try {
token =
DataFetcher.fetchData(
"http://127.0.0.1:8080/Priam/REST/v1/cassconfig/get_token");
seeds =
DataFetcher.fetchData(
"http://127.0.0.1:8080/Priam/REST/v1/cassconfig/get_seeds");
isReplace =
Boolean.parseBoolean(
DataFetcher.fetchData(
"http://127.0.0.1:8080/Priam/REST/v1/cassconfig/is_replace_token"));
replacedIp =
DataFetcher.fetchData(
"http://127.0.0.1:8080/Priam/REST/v1/cassconfig/get_replaced_ip");
extraEnvParams =
DataFetcher.fetchData(
"http://127.0.0.1:8080/Priam/REST/v1/cassconfig/get_extra_env_params");
} catch (Exception e) {
System.out.println(
"Failed to obtain startup data from priam, can not start yet. will retry shortly");
e.printStackTrace();
}
if (token != null && seeds != null) break;
try {
Thread.sleep(5 * 1000);
} catch (InterruptedException e1) {
// do nothing.
}
}
System.setProperty("cassandra.initial_token", token);
setExtraEnvParams(extraEnvParams);
if (isReplace) {
System.out.println(
"Detect cassandra version : " + FBUtilities.getReleaseVersionString());
if (FBUtilities.getReleaseVersionString().compareTo(REPLACED_ADDRESS_MIN_VER) < 0) {
System.setProperty("cassandra.replace_token", token);
} else {
System.setProperty("cassandra.replace_address_first_boot", replacedIp);
}
}
}
private void setExtraEnvParams(String extraEnvParams) {
try {
if (null != extraEnvParams && extraEnvParams.length() > 0) {
JSONParser parser = new JSONParser();
Object obj = parser.parse(extraEnvParams);
JSONObject jsonObj = (JSONObject) obj;
if (jsonObj.size() > 0) {
for (Iterator iterator = jsonObj.keySet().iterator(); iterator.hasNext(); ) {
String key = (String) iterator.next();
String val = (String) jsonObj.get(key);
if (StringUtils.isNotBlank(key) && StringUtils.isNotBlank(val)) {
System.setProperty(key.trim(), val.trim());
}
}
}
}
} catch (Exception e) {
System.out.println(
"Failed to parse extra env params: "
+ extraEnvParams
+ ". However, ignoring the exception.");
e.printStackTrace();
}
}
}
| 3,378 |
0 | Create_ds/Priam/priam-cass-extensions/src/main/java/com/netflix/priam/cassandra | Create_ds/Priam/priam-cass-extensions/src/main/java/com/netflix/priam/cassandra/extensions/DataFetcher.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.priam.cassandra.extensions;
import com.google.common.base.Charsets;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.FilterInputStream;
import java.net.HttpURLConnection;
import java.net.URL;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DataFetcher {
private static final Logger logger = LoggerFactory.getLogger(DataFetcher.class);
public static String fetchData(String url) {
DataInputStream responseStream = null;
try {
HttpURLConnection conn = (HttpURLConnection) new URL(url).openConnection();
conn.setConnectTimeout(1000);
conn.setReadTimeout(10000);
conn.setRequestMethod("GET");
if (conn.getResponseCode() != 200)
throw new RuntimeException("Unable to get data for URL " + url);
byte[] b = new byte[2048];
ByteArrayOutputStream bos = new ByteArrayOutputStream();
responseStream = new DataInputStream((FilterInputStream) conn.getContent());
int c = 0;
while ((c = responseStream.read(b, 0, b.length)) != -1) bos.write(b, 0, c);
String return_ = new String(bos.toByteArray(), Charsets.UTF_8);
logger.info("Calling URL API: {} returns: {}", url, return_);
conn.disconnect();
return return_;
} catch (Exception ex) {
throw new RuntimeException(ex);
} finally {
try {
if (responseStream != null) responseStream.close();
} catch (Exception e) {
logger.warn("Failed to close response stream from priam", e);
}
}
}
}
| 3,379 |
0 | Create_ds/inviso/trace-mr2/src/main/java/com/netflix/bdp/inviso | Create_ds/inviso/trace-mr2/src/main/java/com/netflix/bdp/inviso/history/TraceJobHistoryLoader.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.inviso.history;
import com.netflix.bdp.inviso.history.job.Job;
import com.netflix.bdp.inviso.history.job.Task;
import com.netflix.bdp.inviso.history.job.TaskAttempt;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.commons.lang.math.NumberUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.CounterGroup;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.HistoryEventHandler;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.SerializationConfig.Feature;
/**
* Basic handler for loading history file.
*
* @author dweeks
*/
public class TraceJobHistoryLoader implements HistoryEventHandler {
private static final org.apache.log4j.Logger log = org.apache.log4j.Logger.getLogger(TraceJobHistoryLoader.class.getName());
private static final Map<String,String> COUNTER_TAGS = new HashMap<>();
static {
COUNTER_TAGS.put("counters", "getCounters");
COUNTER_TAGS.put("mapCounters", "getMapCounters");
COUNTER_TAGS.put("reduceCounters", "getReduceCounters");
COUNTER_TAGS.put("totalCounters", "getTotalCounters");
}
private Set<String> skipElements = new HashSet<>();
private Job job = new Job();
public TraceJobHistoryLoader(PropertiesConfiguration properties) {
skipElements.addAll(properties.getList("inviso.trace.skip.elements"));
}
@Override
public void handleEvent(HistoryEvent event) throws IOException {
try {
switch (event.getEventType()) {
case AM_STARTED:
case JOB_SUBMITTED:
case JOB_STATUS_CHANGED:
case JOB_PRIORITY_CHANGED:
case JOB_INITED:
case JOB_INFO_CHANGED:
case JOB_FINISHED:
/* History doesn't have an event for success, so we need to derive
the successful state.
*/
job.put("jobStatus", JobStatus.State.SUCCEEDED.toString());
case JOB_FAILED:
case JOB_ERROR:
case JOB_KILLED:
handleJobEvent(event);
break;
//Task Events
case TASK_STARTED:
case TASK_UPDATED:
case TASK_FINISHED:
case TASK_FAILED:
handleTaskEvent(event);
break;
case SETUP_ATTEMPT_STARTED:
case SETUP_ATTEMPT_FAILED:
case SETUP_ATTEMPT_FINISHED:
case SETUP_ATTEMPT_KILLED:
case CLEANUP_ATTEMPT_STARTED:
case CLEANUP_ATTEMPT_FINISHED:
case CLEANUP_ATTEMPT_KILLED:
case CLEANUP_ATTEMPT_FAILED:
case MAP_ATTEMPT_STARTED:
case MAP_ATTEMPT_FINISHED:
case MAP_ATTEMPT_KILLED:
case MAP_ATTEMPT_FAILED:
case REDUCE_ATTEMPT_STARTED:
case REDUCE_ATTEMPT_FINISHED:
case REDUCE_ATTEMPT_KILLED:
case REDUCE_ATTEMPT_FAILED:
handleAttemptEvent(event);
break;
default: log.info("Ignoring event: " + event);
}
} catch (IllegalArgumentException | IllegalAccessException | NoSuchMethodException | InvocationTargetException e) {
log.error("", e);
}
}
private void handleJobEvent(HistoryEvent event) throws IllegalArgumentException, IllegalAccessException, NoSuchMethodException, InvocationTargetException {
for(Field f : event.getDatum().getClass().getFields()) {
f.setAccessible(true);
if(Modifier.isStatic(f.getModifiers())) {
continue;
}
String name = f.getName();
Object value = f.get(event.getDatum());
if(skipElements.contains(name)) {
continue;
}
if(value instanceof CharSequence) {
value = value.toString();
}
if(value == null || value.toString().trim().isEmpty()) {
continue;
}
if(COUNTER_TAGS.containsKey(name)) {
Method m = event.getClass().getDeclaredMethod(COUNTER_TAGS.get(name), new Class[0]);
m.setAccessible(true);
Counters counters = (Counters) m.invoke(event, new Object[0]);
value = handleCounterEntries(counters);
}
job.put(name, value);
}
}
private void handleTaskEvent(HistoryEvent event) throws IllegalArgumentException, IllegalAccessException, NoSuchMethodException, InvocationTargetException {
Task task = new Task();
for(Field f : event.getDatum().getClass().getFields()) {
f.setAccessible(true);
if(Modifier.isStatic(f.getModifiers())) {
continue;
}
String name = f.getName();
Object value = f.get(event.getDatum());
if(skipElements.contains(name)) {
continue;
}
if(value instanceof CharSequence) {
value = value.toString();
}
if(value == null || value.toString().trim().isEmpty()) {
continue;
}
if("counters".equals(name)) {
Method m = event.getClass().getDeclaredMethod("getCounters", new Class[0]);
m.setAccessible(true);
Counters counters = (Counters) m.invoke(event, new Object[0]);
value = handleCounterEntries(counters);
}
task.put(name, value);
}
String taskId = (String) task.get("taskid");
job.getTask(taskId).merge(task);
}
private void handleAttemptEvent(HistoryEvent event) throws IllegalArgumentException, IllegalAccessException, NoSuchMethodException, InvocationTargetException {
TaskAttempt attempt = new TaskAttempt();
for(Field f : event.getDatum().getClass().getFields()) {
f.setAccessible(true);
if(Modifier.isStatic(f.getModifiers())) {
continue;
}
String name = f.getName();
Object value = f.get(event.getDatum());
if(skipElements.contains(name)) {
continue;
}
if(value instanceof CharSequence) {
value = value.toString();
}
if(value == null || value.toString().trim().isEmpty()) {
continue;
}
if("counters".equals(name)) {
Method m = event.getClass().getDeclaredMethod("getCounters", new Class[0]);
m.setAccessible(true);
Counters counters = (Counters) m.invoke(event, new Object[0]);
value = handleCounterEntries(counters);
}
attempt.put(name, value);
}
Task task = job.getTask("taskid");
task.getAttempt((String) attempt.get("attemptId")).merge(attempt);
}
private Map<String, Map<String, Long>> handleCounterEntries(Counters counters) {
Map<String, Map<String, Long>> result = new HashMap<>();
for(CounterGroup group : counters) {
Map<String,Long> cmap = new HashMap<>();
for(Counter counter : group) {
cmap.put(counter.getDisplayName(), counter.getValue());
}
result.put(group.getDisplayName(), cmap);
}
return result;
}
private Object parseValue(String value) {
Object result = value;
if(NumberUtils.isDigits((String) value)) {
result = Long.parseLong(value);
} else if(NumberUtils.isNumber((String) value)) {
result = Double.parseDouble(value);
}
return result;
}
public Job getJob() {
return job;
}
public static void main(String[] args) throws Exception {
FileSystem fs = FileSystem.newInstanceLocal(new Configuration());
JobHistoryParser parser = new JobHistoryParser(fs, "/tmp/job_1405808155709_124465.history");
//JobInfo jobInfo = parser.parse();
TraceJobHistoryLoader loader = new TraceJobHistoryLoader(new PropertiesConfiguration());
parser.parse(loader);
ObjectMapper mapper = new ObjectMapper();
mapper.configure(Feature.INDENT_OUTPUT, true);
mapper.writeValue(new File("/tmp/mr2-hist.json"), loader.getJob());
}
}
| 3,380 |
0 | Create_ds/inviso/trace-mr2/src/main/java/com/netflix/bdp/inviso | Create_ds/inviso/trace-mr2/src/main/java/com/netflix/bdp/inviso/history/TraceService.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.inviso.history;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.databind.JavaType;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.module.SimpleModule;
import com.fasterxml.jackson.databind.ser.std.MapSerializer;
import com.fasterxml.jackson.databind.type.MapLikeType;
import com.fasterxml.jackson.databind.type.SimpleType;
import com.netflix.bdp.inviso.fs.WrappedCompressionInputStream;
import com.netflix.bdp.inviso.history.job.Job;
import com.netflix.bdp.inviso.history.job.Task;
import com.netflix.bdp.inviso.history.job.TaskAttempt;
import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.WebApplicationException;
import net.sf.ehcache.CacheManager;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
import org.apache.log4j.Logger;
/**
* REST API to load a full job history file as a json object.
*
* @author dweeks
*/
@Path("trace")
public class TraceService implements ServletContextListener {
private static final Logger log = Logger.getLogger(TraceService.class.getName());
private static Configuration config;
private static HistoryLocator historyLocator;
private static PropertiesConfiguration properties;
@Override
public void contextInitialized(ServletContextEvent context) {
log.info("Initializing Trace Service");
config = new Configuration();
properties = new PropertiesConfiguration();
try {
properties.load(TraceService.class.getClassLoader().getResourceAsStream("trace.properties"));
Class<?> c = config.getClass("trace.history.locator.impl", com.netflix.bdp.inviso.history.impl.BucketedHistoryLocator.class);
historyLocator = (HistoryLocator) c.newInstance();
historyLocator.initialize(config);
} catch (Exception e) {
log.error("Failed to initialize trace service.",e);
}
}
@Override
public void contextDestroyed(ServletContextEvent sce) {
CacheManager.getInstance().shutdown();
try {
historyLocator.close();
} catch (Exception e) {
log.error("Failed to close properly.", e);
}
log.info("Trace Service Destroyed");
}
/**
* Returns a json object representing the job history.
*
* @param jobId
* @param path Use the given path as opposed to the history locator
* @param summary Return just the top level details of the job
* @param counters Include counters
* @return Json string
* @throws Exception
*/
@Path("load/{jobId}")
@GET
@Produces("application/json")
public String trace(@PathParam("jobId") final String jobId, @QueryParam("path") final String path, @QueryParam("summary") boolean summary, @QueryParam("counters") @DefaultValue("true") boolean counters) throws Exception {
Pair<org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path> historyPath;
if(path != null) {
historyPath = new ImmutablePair<>(null, new org.apache.hadoop.fs.Path(path));
} else {
historyPath = historyLocator.locate(jobId);
}
if(historyPath == null) {
throw new WebApplicationException(404);
}
TraceJobHistoryLoader loader = new TraceJobHistoryLoader(properties);
FileSystem fs = FileSystem.get(historyPath.getRight().toUri(), config);
CompressionCodec codec = new CompressionCodecFactory(config).getCodec(historyPath.getRight());
FSDataInputStream fin = fs.open(historyPath.getRight());
if(codec != null) {
fin = new FSDataInputStream(new WrappedCompressionInputStream(codec.createInputStream(fin)));
}
JobHistoryParser parser = new JobHistoryParser(fin);
parser.parse(loader);
String [] ignore = { "counters" };
ObjectMapper mapper = new ObjectMapper();
SimpleModule module = new SimpleModule("MyModule", new Version(1, 0, 0, null));
//Job
JavaType jobMapType = MapLikeType.construct(Job.class, SimpleType.construct(String.class), SimpleType.construct(Object.class));
module.addSerializer(Job.class, MapSerializer.construct(ignore, jobMapType, false, null, null, null, null));
//Task
JavaType taskMapType = MapLikeType.construct(Task.class, SimpleType.construct(String.class), SimpleType.construct(Object.class));
module.addSerializer(Task.class, MapSerializer.construct(ignore, taskMapType, false, null, null, null, null));
//Attempt
JavaType attemptMapType = MapLikeType.construct(TaskAttempt.class, SimpleType.construct(String.class), SimpleType.construct(Object.class));
module.addSerializer(TaskAttempt.class, MapSerializer.construct(ignore, attemptMapType, false, null, null, null, null));
if(!counters) {
mapper.registerModule(module);
}
if(summary) {
loader.getJob().clearTasks();
}
return mapper.writeValueAsString(loader.getJob());
}
}
| 3,381 |
0 | Create_ds/inviso/trace-mr2/src/main/java/com/netflix/bdp/inviso | Create_ds/inviso/trace-mr2/src/main/java/com/netflix/bdp/inviso/history/HistoryLocator.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.inviso.history;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
/**
* Interface to abstract where job history files are located.
*
* @author dweeks
*/
public interface HistoryLocator {
void initialize(Configuration config) throws Exception;
Pair<Path, Path> locate(String jobId) throws Exception;
void close() throws Exception;
}
| 3,382 |
0 | Create_ds/inviso/trace-mr2/src/main/java/com/netflix/bdp/inviso/history | Create_ds/inviso/trace-mr2/src/main/java/com/netflix/bdp/inviso/history/impl/BucketedHistoryLocator.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.inviso.history.impl;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import com.netflix.bdp.inviso.history.HistoryLocator;
public class BucketedHistoryLocator implements HistoryLocator {
private Configuration conf;
private Path bucketedPath;
private String historyPostfix;
private String configPostfix;
@Override
public void initialize(Configuration config) throws Exception {
this.conf = config;
bucketedPath = new Path(conf.get("inviso.history.location", "hdfs://tmp"));
configPostfix = conf.get("inviso.history.location.postfix", ".conf.gz");
historyPostfix = conf.get("inviso.history.location.postfix", ".history.gz");
}
/**
* Returns the config and history locations
* @param jobId
* @return
*/
@Override
public Pair<Path, Path> locate(String jobId) {
String bucket = jobId.substring(jobId.length() - conf.getInt("inviso.history.bucket.depth", 3));
Path originConfig = new Path(bucketedPath +Path.SEPARATOR+ bucket, jobId + configPostfix);
Path originHistory = new Path(bucketedPath +Path.SEPARATOR+ bucket, jobId + historyPostfix);
return new ImmutablePair<>(originConfig, originHistory);
}
@Override
public void close() throws Exception { }
}
| 3,383 |
0 | Create_ds/inviso/trace-mr2/src/main/java/com/netflix/bdp/inviso/history | Create_ds/inviso/trace-mr2/src/main/java/com/netflix/bdp/inviso/history/job/TaskAttempt.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.inviso.history.job;
import java.util.HashMap;
/**
* Container for json marshalling.
*
* @author dweeks
*/
public class TaskAttempt extends HashMap<String, Object> {
public void merge(TaskAttempt other) {
this.putAll(other);
}
}
| 3,384 |
0 | Create_ds/inviso/trace-mr2/src/main/java/com/netflix/bdp/inviso/history | Create_ds/inviso/trace-mr2/src/main/java/com/netflix/bdp/inviso/history/job/Job.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.inviso.history.job;
import java.util.HashMap;
import java.util.Map;
import org.codehaus.jackson.map.annotate.JsonSerialize;
/**
* Container for json marshalling.
*
* @author dweeks
*/
public class Job extends HashMap<String, Object> {
private static final String TASKS = "tasks";
private Map<String, Task> tasks = new HashMap<>();
public Job() {
this.put(TASKS, tasks);
}
public Task getTask(String taskId) {
Task task = tasks.get(taskId);
if(task == null) {
task = new Task();
tasks.put(taskId, task);
}
return task;
}
public void clearTasks() {
this.tasks.clear();
this.remove(TASKS);
}
}
| 3,385 |
0 | Create_ds/inviso/trace-mr2/src/main/java/com/netflix/bdp/inviso/history | Create_ds/inviso/trace-mr2/src/main/java/com/netflix/bdp/inviso/history/job/Task.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.inviso.history.job;
import java.util.HashMap;
import java.util.Map;
/**
* Container for json marshalling.
*
* @author dweeks
*/
public class Task extends HashMap<String, Object> {
private static final String ATTEMPTS = "attempts";
private Map<String, TaskAttempt> attempts = new HashMap<>();
public Task() {
this.put(ATTEMPTS, attempts);
}
public void merge(Task other) {
this.attempts.putAll((Map<String, TaskAttempt>) other.remove(ATTEMPTS));
this.putAll(other);
}
public TaskAttempt getAttempt(String attemptId) {
TaskAttempt attempt = this.attempts.get(attemptId);
if(attempt == null) {
attempt = new TaskAttempt();
this.attempts.put(attemptId, attempt);
}
return attempt;
}
}
| 3,386 |
0 | Create_ds/inviso/trace-mr2/src/main/java/com/netflix/bdp/inviso | Create_ds/inviso/trace-mr2/src/main/java/com/netflix/bdp/inviso/log/LogService.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.inviso.log;
import java.io.DataInputStream;
import java.io.EOFException;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintStream;
import javax.ws.rs.GET;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.StreamingOutput;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader;
import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils;
import org.apache.hadoop.yarn.util.ConverterUtils;
/**
* REST endpoint to retrieve the contents of a tasks log contents.
*
* @author dweeks
*/
@javax.ws.rs.Path("log")
public class LogService {
@javax.ws.rs.Path("load/{owner}/{appId}/{containerId}/{nodeId}")
@GET
@Produces("text/plain")
public Response log(@PathParam("owner") String owner,
@PathParam("appId") String appId,
@PathParam("containerId") String containerId,
@PathParam("nodeId") String nodeId,
@QueryParam("fs") String fs,
@QueryParam("root") String root) throws IOException {
Configuration conf = new Configuration();
if(fs != null) {
conf.set("fs.default.name", fs);
}
Path logRoot = new Path(conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
if(root != null) {
logRoot = new Path(root);
}
Path logPath = LogAggregationUtils.getRemoteNodeLogFileForApp(
logRoot,
ConverterUtils.toApplicationId(appId),
owner,
ConverterUtils.toNodeId(nodeId),
LogAggregationUtils.getRemoteNodeLogDirSuffix(conf)
);
AggregatedLogFormat.LogReader reader = new AggregatedLogFormat.LogReader(conf, logPath);
LogKey key = new LogKey();
DataInputStream in = reader.next(key);
while(in != null && !key.toString().equals(containerId)) {
key = new LogKey();
in = reader.next(key);
}
if(in == null) {
throw new WebApplicationException(404);
}
final DataInputStream fin = in;
StreamingOutput stream = new StreamingOutput() {
@Override
public void write(OutputStream os) throws IOException, WebApplicationException {
PrintStream out = new PrintStream(os);
while(true) {
try {
LogReader.readAContainerLogsForALogType(fin, out);
out.flush();
} catch (EOFException e) {
break;
}
}
}
};
return Response.ok(stream).build();
}
}
| 3,387 |
0 | Create_ds/inviso/trace-mr2/src/main/java/com/netflix/bdp/inviso | Create_ds/inviso/trace-mr2/src/main/java/com/netflix/bdp/inviso/fs/S3DelegateFS.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.inviso.fs;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DelegateToFileSystem;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.s3native.NativeS3FileSystem;
/**
* Delegate for NativeS3FileSystem
*
* @author dweeks
*/
public class S3DelegateFS extends DelegateToFileSystem {
public S3DelegateFS(URI uri, Configuration conf) throws IOException, URISyntaxException {
super(uri, new NativeS3FileSystem(), conf, "s3", false);
}
public S3DelegateFS(URI theUri, FileSystem theFsImpl, Configuration conf, String supportedScheme, boolean authorityRequired) throws IOException, URISyntaxException {
super(theUri, theFsImpl, conf, supportedScheme, authorityRequired);
}
@Override
public void checkPath(Path path) {
//bypass
}
}
| 3,388 |
0 | Create_ds/inviso/trace-mr2/src/main/java/com/netflix/bdp/inviso | Create_ds/inviso/trace-mr2/src/main/java/com/netflix/bdp/inviso/fs/WrappedCompressionInputStream.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.inviso.fs;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.PositionedReadable;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.io.compress.CompressionInputStream;
/**
* Wrapping CompressionInputStream so the job history loader can read a compressed stream.
*
* @author dweeks
*/
public class WrappedCompressionInputStream extends CompressionInputStream implements PositionedReadable {
public WrappedCompressionInputStream(CompressionInputStream in) throws IOException {
super(in);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
return ((CompressionInputStream)this.in).read(b, off, len);
}
@Override
public void resetState() throws IOException {
((CompressionInputStream)this.in).resetState();
}
@Override
public int read() throws IOException {
return ((CompressionInputStream)this.in).read();
}
@Override
public int read(long position, byte[] buffer, int offset, int length) throws IOException {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public void readFully(long position, byte[] buffer, int offset, int length) throws IOException {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public void readFully(long position, byte[] buffer) throws IOException {
throw new UnsupportedOperationException("Not supported yet.");
}
}
| 3,389 |
0 | Create_ds/inviso/trace-mr1/src/main/java/com/netflix/bdp/inviso | Create_ds/inviso/trace-mr1/src/main/java/com/netflix/bdp/inviso/history/TraceJobHistoryLoader.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.inviso.history;
import com.netflix.bdp.inviso.history.job.Job;
import com.netflix.bdp.inviso.history.job.Task;
import com.netflix.bdp.inviso.history.job.TaskAttempt;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.text.ParseException;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.math.NumberUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.Counters.Counter;
import org.apache.hadoop.mapred.Counters.Group;
import org.apache.hadoop.mapred.JobHistory;
import org.apache.hadoop.mapred.JobHistory.Keys;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.ObjectWriter;
/**
* Simple Job History Parser
*
* @author dweeks
*/
public class TraceJobHistoryLoader implements JobHistory.Listener {
private Job job = new Job();
@Override
public void handle(JobHistory.RecordTypes rt, Map<Keys, String> entries) throws IOException {
switch(rt) {
case Job: handleJobEntries(job, entries);
break;
case Task: handleTaskEntries(job, entries);
break;
case MapAttempt:
case ReduceAttempt: handleAttemptEntries(job, entries);
break;
case Meta:
break;
case Jobtracker:
break;
default: throw new RuntimeException("Unknown Record Type: " + rt);
}
}
private void handleJobEntries(Job job, Map<Keys, String> entries) {
for(Entry<Keys, String> e : entries.entrySet()) {
switch(e.getKey()) {
case COUNTERS:
handleCounterEntries(job, e.getValue());
break;
case MAP_COUNTERS:
case REDUCE_COUNTERS:
break;
default:
String key = e.getKey().name().toLowerCase().trim();
key = key.replaceFirst("^job(_)?", "");
job.put(key, parseValue(e.getValue()));
}
}
}
private void handleTaskEntries(Job job, Map<Keys, String> entries) {
Task task = new Task();
for(Entry<Keys, String> e : entries.entrySet()) {
switch(e.getKey()) {
case COUNTERS:
handleCounterEntries(task, e.getValue());
break;
default:
String key = e.getKey().name().toLowerCase().trim();
task.put(key, parseValue(e.getValue()));
}
}
String taskId = (String) task.get(Keys.TASKID.name().toLowerCase());
job.getTask(taskId).merge(task);
}
private void handleAttemptEntries(Job job, Map<Keys, String> entries) {
TaskAttempt attempt = new TaskAttempt();
for(Entry<Keys, String> e : entries.entrySet()) {
switch(e.getKey()) {
case COUNTERS:
handleCounterEntries(attempt, e.getValue());
break;
default:
String key = e.getKey().name().toLowerCase().trim();
attempt.put(key, parseValue(e.getValue()));
}
}
Task task = job.getTask((String) attempt.get(Keys.TASKID.name().toLowerCase()));
task.getAttempt((String) attempt.get(Keys.TASK_ATTEMPT_ID.name().toLowerCase())).merge(attempt);
}
private void handleCounterEntries(Map map, String compactString) {
try {
Map<String, Map<String, Long>> counters = new HashMap<>();
for(Group group : Counters.fromEscapedCompactString(compactString)) {
Map<String,Long> cmap = new HashMap<>();
for(Counter counter : group) {
cmap.put(counter.getDisplayName(), counter.getCounter());
}
counters.put(group.getDisplayName(), cmap);
}
map.put("counters", counters);
} catch (ParseException ex) {
Logger.getLogger(TraceJobHistoryLoader.class.getName()).log(Level.SEVERE, null, ex);
}
}
private Object parseValue(String value) {
Object result = value;
if(NumberUtils.isDigits((String) value)) {
result = Long.parseLong(value);
} else if(NumberUtils.isNumber((String) value)) {
result = Double.parseDouble(value);
}
return result;
}
public Job getJob() {
return job;
}
}
| 3,390 |
0 | Create_ds/inviso/trace-mr1/src/main/java/com/netflix/bdp/inviso | Create_ds/inviso/trace-mr1/src/main/java/com/netflix/bdp/inviso/history/TraceService.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.inviso.history;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.databind.JavaType;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.module.SimpleModule;
import com.fasterxml.jackson.databind.ser.std.MapSerializer;
import com.fasterxml.jackson.databind.type.MapLikeType;
import com.fasterxml.jackson.databind.type.SimpleType;
import com.netflix.bdp.inviso.history.job.Job;
import com.netflix.bdp.inviso.history.job.Task;
import com.netflix.bdp.inviso.history.job.TaskAttempt;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.WebApplicationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.mapred.JobHistory;
import org.apache.log4j.Logger;
/**
* REST API to load a full job history file as a json object.
*
* @author dweeks
*/
@Path("trace")
public class TraceService implements ServletContextListener {
private static final Logger log = Logger.getLogger(TraceService.class.getName());
private static Configuration config;
private static HistoryLocator historyLocator;
private static PropertiesConfiguration properties;
@Override
public void contextInitialized(ServletContextEvent context) {
log.info("Initializing Trace Service");
config = new Configuration();
properties = new PropertiesConfiguration();
try {
properties.load(TraceService.class.getClassLoader().getResourceAsStream("trace.properties"));
Class<?> c = config.getClass("trace.history.locator.impl", com.netflix.bdp.inviso.history.impl.BucketedHistoryLocator.class);
historyLocator = (HistoryLocator) c.newInstance();
historyLocator.initialize(config);
} catch (Exception e) {
log.error("Failed to initialize trace service.",e);
}
}
@Override
public void contextDestroyed(ServletContextEvent sce) {
try {
historyLocator.close();
} catch (Exception e) {
log.error("Failed to close properly.", e);
}
log.info("Trace Service Destroyed");
}
@Path("load/{jobId}")
@GET
@Produces("application/json")
public String trace(@PathParam("jobId") final String jobId, @QueryParam("path") final String path, @QueryParam("summary") boolean summary, @QueryParam("counters") @DefaultValue("true") boolean counters) throws Exception {
Pair<org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path> historyPath;
if(path != null) {
historyPath = new ImmutablePair<>(null, new org.apache.hadoop.fs.Path(path));
} else {
historyPath = historyLocator.locate(jobId);
}
if(historyPath == null) {
throw new WebApplicationException(404);
}
TraceJobHistoryLoader loader = new TraceJobHistoryLoader();
JobHistory.parseHistoryFromFS(historyPath.getRight().toString(), loader, historyPath.getRight().getFileSystem(config));
String [] ignore = { "counters" };
ObjectMapper mapper = new ObjectMapper();
SimpleModule module = new SimpleModule("MyModule", new Version(1, 0, 0, null));
//Job
JavaType jobMapType = MapLikeType.construct(Job.class, SimpleType.construct(String.class), SimpleType.construct(Object.class));
module.addSerializer(Job.class, MapSerializer.construct(ignore, jobMapType, false, null, null, null, null));
//Task
JavaType taskMapType = MapLikeType.construct(Task.class, SimpleType.construct(String.class), SimpleType.construct(Object.class));
module.addSerializer(Task.class, MapSerializer.construct(ignore, taskMapType, false, null, null, null, null));
//Attempt
JavaType attemptMapType = MapLikeType.construct(TaskAttempt.class, SimpleType.construct(String.class), SimpleType.construct(Object.class));
module.addSerializer(TaskAttempt.class, MapSerializer.construct(ignore, attemptMapType, false, null, null, null, null));
if(!counters) {
mapper.registerModule(module);
}
if(summary) {
loader.getJob().clearTasks();
}
return mapper.writeValueAsString(loader.getJob());
}
}
| 3,391 |
0 | Create_ds/inviso/trace-mr1/src/main/java/com/netflix/bdp/inviso | Create_ds/inviso/trace-mr1/src/main/java/com/netflix/bdp/inviso/history/HistoryLocator.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.inviso.history;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
/**
* Interface to abstract where job history files are located.
*
* @author dweeks
*/
public interface HistoryLocator {
void initialize(Configuration config) throws Exception;
/**
* Return a pair of job history file and job config file.
*
* @param jobId
* @return
* @throws Exception
*/
Pair<Path, Path> locate(String jobId) throws Exception;
void close() throws Exception;
}
| 3,392 |
0 | Create_ds/inviso/trace-mr1/src/main/java/com/netflix/bdp/inviso/history | Create_ds/inviso/trace-mr1/src/main/java/com/netflix/bdp/inviso/history/impl/BucketedHistoryLocator.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.inviso.history.impl;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import com.netflix.bdp.inviso.history.HistoryLocator;
public class BucketedHistoryLocator implements HistoryLocator {
private Configuration conf;
private Path bucketedPath;
private String historyPostfix;
private String configPostfix;
@Override
public void initialize(Configuration config) throws Exception {
this.conf = config;
bucketedPath = new Path(conf.get("inviso.history.location", "hdfs://tmp"));
configPostfix = conf.get("inviso.history.location.postfix", ".conf.gz");
historyPostfix = conf.get("inviso.history.location.postfix", ".history.gz");
}
/**
* Copies and unzips the S3 location file
*/
@Override
public Pair<Path, Path> locate(String jobId) throws Exception {
String bucket = jobId.substring(jobId.length() - conf.getInt("inviso.history.bucket.depth", 3));
Path originConfig = new Path(bucketedPath +Path.SEPARATOR+ bucket, jobId + configPostfix);
Path originHistory = new Path(bucketedPath +Path.SEPARATOR+ bucket, jobId + historyPostfix);
return new ImmutablePair<>(originConfig, originHistory);
}
@Override
public void close() throws Exception { }
}
| 3,393 |
0 | Create_ds/inviso/trace-mr1/src/main/java/com/netflix/bdp/inviso/history | Create_ds/inviso/trace-mr1/src/main/java/com/netflix/bdp/inviso/history/job/TaskAttempt.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.inviso.history.job;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.mapred.JobHistory;
/**
*
* @author dweeks
*/
public class TaskAttempt extends HashMap<String, Object> {
public void merge(TaskAttempt other) {
this.putAll(other);
}
}
| 3,394 |
0 | Create_ds/inviso/trace-mr1/src/main/java/com/netflix/bdp/inviso/history | Create_ds/inviso/trace-mr1/src/main/java/com/netflix/bdp/inviso/history/job/Job.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.inviso.history.job;
import java.util.HashMap;
import java.util.Map;
import org.codehaus.jackson.map.annotate.JsonSerialize;
/**
*
* @author dweeks
*/
public class Job extends HashMap<String, Object> {
private static final String TASKS = "tasks";
private Map<String, Task> tasks = new HashMap<>();
public Job() {
this.put(TASKS, tasks);
}
public Task getTask(String taskId) {
Task task = tasks.get(taskId);
if(task == null) {
task = new Task();
tasks.put(taskId, task);
}
return task;
}
public void clearTasks() {
this.tasks.clear();
this.remove(TASKS);
}
}
| 3,395 |
0 | Create_ds/inviso/trace-mr1/src/main/java/com/netflix/bdp/inviso/history | Create_ds/inviso/trace-mr1/src/main/java/com/netflix/bdp/inviso/history/job/Task.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.inviso.history.job;
import java.util.HashMap;
import java.util.Map;
/**
*
* @author dweeks
*/
public class Task extends HashMap<String, Object> {
private static final String ATTEMPTS = "attempts";
private Map<String, TaskAttempt> attempts = new HashMap<>();
public Task() {
this.put(ATTEMPTS, attempts);
}
public void merge(Task other) {
this.attempts.putAll((Map<String, TaskAttempt>) other.remove(ATTEMPTS));
this.putAll(other);
}
public TaskAttempt getAttempt(String attemptId) {
TaskAttempt attempt = this.attempts.get(attemptId);
if(attempt == null) {
attempt = new TaskAttempt();
this.attempts.put(attemptId, attempt);
}
return attempt;
}
}
| 3,396 |
0 | Create_ds/inviso/trace-mr1/src/main/java/com/netflix/bdp/inviso | Create_ds/inviso/trace-mr1/src/main/java/com/netflix/bdp/inviso/fs/WrappedCompressionInputStream.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.inviso.fs;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.PositionedReadable;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.io.compress.CompressionInputStream;
/**
* Wrapping CompressionInputStream so the job history loader can read a compressed stream.
*
* @author dweeks
*/
public class WrappedCompressionInputStream extends CompressionInputStream implements PositionedReadable {
public WrappedCompressionInputStream(CompressionInputStream in) throws IOException {
super(in);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
return ((CompressionInputStream)this.in).read(b, off, len);
}
@Override
public void resetState() throws IOException {
((CompressionInputStream)this.in).resetState();
}
@Override
public int read() throws IOException {
return ((CompressionInputStream)this.in).read();
}
@Override
public int read(long position, byte[] buffer, int offset, int length) throws IOException {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public void readFully(long position, byte[] buffer, int offset, int length) throws IOException {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public void readFully(long position, byte[] buffer) throws IOException {
throw new UnsupportedOperationException("Not supported yet.");
}
}
| 3,397 |
0 | Create_ds/chancery/src/test/java/com/airbnb | Create_ds/chancery/src/test/java/com/airbnb/chancery/StupidRateLimitTest.java | package com.airbnb.chancery;
import com.airbnb.chancery.github.GithubClient;
import com.airbnb.chancery.model.RateLimitStats;
import com.sun.jersey.api.client.Client;
import lombok.extern.slf4j.Slf4j;
import org.junit.Assert;
import org.junit.Test;
import java.net.URISyntaxException;
@Slf4j
public class StupidRateLimitTest {
@Test
public void testRateLimit() throws URISyntaxException, GithubFailure.forRateLimit {
final GithubClient client = new GithubClient(new Client(), null);
final RateLimitStats data = client.getRateLimitData();
StupidRateLimitTest.log.info("Rate limiting data: {}", data);
Assert.assertTrue(data.getLimit() > 10);
Assert.assertTrue(data.getRemaining() > 10);
Assert.assertTrue(data.getRemaining() <= data.getLimit());
}
}
| 3,398 |
0 | Create_ds/chancery/src/test/java/com/airbnb | Create_ds/chancery/src/test/java/com/airbnb/chancery/StupidDeserializationTest.java | package com.airbnb.chancery;
import com.airbnb.chancery.model.CallbackPayload;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;
import org.junit.Test;
import java.io.IOException;
import java.io.InputStream;
@Slf4j
public class StupidDeserializationTest {
@Test
public final void testDeserialization() throws IOException {
final InputStream stream = ClassLoader.getSystemResourceAsStream("example.json");
final ObjectMapper mapper = new ObjectMapper();
final CallbackPayload payload = mapper.readValue(stream, CallbackPayload.class);
log.info("Payload: {}", payload);
}
}
| 3,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.